ia64/xen-unstable

changeset 9934:51484df99be1

Merged.
author emellor@leeni.uk.xensource.com
date Thu May 04 14:19:19 2006 +0100 (2006-05-04)
parents 55f73916d319 1a84eec74331
children 47dede68bbed
files linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/include/xen/tpmfe.h
line diff
     1.1 --- a/.hgignore	Tue May 02 18:17:59 2006 +0100
     1.2 +++ b/.hgignore	Thu May 04 14:19:19 2006 +0100
     1.3 @@ -14,6 +14,7 @@
     1.4  .*\.orig$
     1.5  .*\.rej$
     1.6  .*/a\.out$
     1.7 +.*cscope\.*$
     1.8  ^[^/]*\.bz2$
     1.9  ^TAGS$
    1.10  ^dist/.*$
    1.11 @@ -184,7 +185,6 @@
    1.12  ^tools/xm-test/ramdisk/buildroot
    1.13  ^xen/BLOG$
    1.14  ^xen/TAGS$
    1.15 -^xen/cscope\.*$
    1.16  ^xen/arch/x86/asm-offsets\.s$
    1.17  ^xen/arch/x86/boot/mkelf32$
    1.18  ^xen/arch/x86/xen\.lds$
     2.1 --- a/docs/misc/vtpm.txt	Tue May 02 18:17:59 2006 +0100
     2.2 +++ b/docs/misc/vtpm.txt	Thu May 04 14:19:19 2006 +0100
     2.3 @@ -21,11 +21,11 @@ Compile the XEN tree as usual after the 
     2.4  linux-2.6.??-xen/.config file:
     2.5  
     2.6  CONFIG_XEN_TPMDEV_BACKEND=y
     2.7 -CONFIG_XEN_TPMDEV_GRANT=y
     2.8  
     2.9 -CONFIG_TCG_TPM=m
    2.10 +CONFIG_TCG_TPM=y
    2.11  CONFIG_TCG_NSC=m
    2.12  CONFIG_TCG_ATMEL=m
    2.13 +CONFIG_TCG_XEN=y
    2.14  
    2.15  You must also enable the virtual TPM to be built:
    2.16  
    2.17 @@ -33,6 +33,12 @@ In Config.mk in the Xen root directory s
    2.18  
    2.19  VTPM_TOOLS ?= y
    2.20  
    2.21 +and in
    2.22 +
    2.23 +tools/vtpm/Rules.mk set the line
    2.24 +
    2.25 +BUILD_EMULATOR = y
    2.26 +
    2.27  Now build the Xen sources from Xen's root directory:
    2.28  
    2.29  make install
     3.1 --- a/extras/mini-os/Makefile	Tue May 02 18:17:59 2006 +0100
     3.2 +++ b/extras/mini-os/Makefile	Thu May 04 14:19:19 2006 +0100
     3.3 @@ -60,4 +60,12 @@ clean:
     3.4  %.o: %.S $(HDRS) Makefile
     3.5  	$(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
     3.6  
     3.7 +define all_sources
     3.8 +     ( find . -follow -name SCCS -prune -o -name '*.[chS]' -print )
     3.9 +endef
    3.10  
    3.11 +.PHONY: cscope
    3.12 +cscope:
    3.13 +	$(all_sources) > cscope.files
    3.14 +	cscope -k -b -q
    3.15 +
     4.1 --- a/extras/mini-os/include/lib.h	Tue May 02 18:17:59 2006 +0100
     4.2 +++ b/extras/mini-os/include/lib.h	Thu May 04 14:19:19 2006 +0100
     4.3 @@ -56,6 +56,7 @@
     4.4  #define _LIB_H_
     4.5  
     4.6  #include <stdarg.h>
     4.7 +#include <stddef.h>
     4.8  #include <console.h>
     4.9  
    4.10  /* printing */
     5.1 --- a/extras/mini-os/include/os.h	Tue May 02 18:17:59 2006 +0100
     5.2 +++ b/extras/mini-os/include/os.h	Thu May 04 14:19:19 2006 +0100
     5.3 @@ -7,9 +7,6 @@
     5.4  #ifndef _OS_H_
     5.5  #define _OS_H_
     5.6  
     5.7 -#define NULL 0
     5.8 -
     5.9 -
    5.10  #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
    5.11  #define __builtin_expect(x, expected_value) (x)
    5.12  #endif
     6.1 --- a/extras/mini-os/include/types.h	Tue May 02 18:17:59 2006 +0100
     6.2 +++ b/extras/mini-os/include/types.h	Thu May 04 14:19:19 2006 +0100
     6.3 @@ -34,8 +34,6 @@ typedef signed long         s64;
     6.4  typedef unsigned long       u64;
     6.5  #endif
     6.6  
     6.7 -typedef unsigned int        size_t;
     6.8 -
     6.9  /* FreeBSD compat types */
    6.10  typedef unsigned char       u_char;
    6.11  typedef unsigned int        u_int;
     7.1 --- a/extras/mini-os/sched.c	Tue May 02 18:17:59 2006 +0100
     7.2 +++ b/extras/mini-os/sched.c	Thu May 04 14:19:19 2006 +0100
     7.3 @@ -324,7 +324,7 @@ void th_f2(void *data)
     7.4  
     7.5  void init_sched(void)
     7.6  {
     7.7 -    printk("Initialising scheduler, idle_thread %p\n", idle_thread);
     7.8 +    printk("Initialising scheduler\n");
     7.9  
    7.10      idle_thread = create_thread("Idle", idle_thread_fn, NULL);
    7.11      INIT_LIST_HEAD(&idle_thread->thread_list);
     8.1 --- a/extras/mini-os/traps.c	Tue May 02 18:17:59 2006 +0100
     8.2 +++ b/extras/mini-os/traps.c	Thu May 04 14:19:19 2006 +0100
     8.3 @@ -123,8 +123,13 @@ void page_walk(unsigned long virt_addres
     8.4  void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     8.5  {
     8.6      unsigned long addr = read_cr2();
     8.7 -    printk("Page fault at linear address %p, regs %p, code %lx\n", addr, regs,
     8.8 -	   error_code);
     8.9 +#if defined(__x86_64__)
    8.10 +    printk("Page fault at linear address %p, rip %p, code %lx\n",
    8.11 +           addr, regs->rip, error_code);
    8.12 +#else
    8.13 +    printk("Page fault at linear address %p, eip %p, code %lx\n",
    8.14 +           addr, regs->eip, error_code);
    8.15 +#endif
    8.16      dump_regs(regs);
    8.17      page_walk(addr);
    8.18      do_exit();
    8.19 @@ -195,7 +200,6 @@ static trap_info_t trap_table[] = {
    8.20      { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
    8.21      { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
    8.22      { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
    8.23 -    { 18, 0, __KERNEL_CS, (unsigned long)machine_check               },
    8.24      { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
    8.25      {  0, 0,           0, 0                           }
    8.26  };
     9.1 --- a/extras/mini-os/x86_32.S	Tue May 02 18:17:59 2006 +0100
     9.2 +++ b/extras/mini-os/x86_32.S	Thu May 04 14:19:19 2006 +0100
     9.3 @@ -30,10 +30,10 @@ shared_info:
     9.4  hypercall_page:
     9.5          .org 0x3000
     9.6  
     9.7 -ES		= 0x1c
     9.8 -ORIG_EAX	= 0x20
     9.9 -EIP		= 0x24
    9.10 -CS		= 0x28
    9.11 +ES		= 0x20
    9.12 +ORIG_EAX	= 0x24
    9.13 +EIP		= 0x28
    9.14 +CS		= 0x2C
    9.15  
    9.16  #define ENTRY(X) .globl X ; X :
    9.17  
    9.18 @@ -69,7 +69,7 @@ ENTRY(divide_error)
    9.19  	pushl $0		# no error code
    9.20  	pushl $do_divide_error
    9.21  do_exception:
    9.22 -	pushl %ds
    9.23 +    pushl %ds
    9.24  	pushl %eax
    9.25  	xorl %eax, %eax
    9.26  	pushl %ebp
    9.27 @@ -92,7 +92,7 @@ do_exception:
    9.28      pushl %edx
    9.29      pushl %eax
    9.30  	call *%edi
    9.31 -    addl $8,%esp
    9.32 +    jmp ret_from_exception
    9.33      
    9.34  ret_from_exception:
    9.35          movb CS(%esp),%cl
    9.36 @@ -223,70 +223,55 @@ ENTRY(invalid_op)
    9.37  	pushl $do_invalid_op
    9.38  	jmp do_exception
    9.39  
    9.40 +
    9.41  ENTRY(coprocessor_segment_overrun)
    9.42  	pushl $0
    9.43  	pushl $do_coprocessor_segment_overrun
    9.44  	jmp do_exception
    9.45  
    9.46 +
    9.47  ENTRY(invalid_TSS)
    9.48  	pushl $do_invalid_TSS
    9.49  	jmp do_exception
    9.50  
    9.51 +
    9.52  ENTRY(segment_not_present)
    9.53  	pushl $do_segment_not_present
    9.54  	jmp do_exception
    9.55  
    9.56 +
    9.57  ENTRY(stack_segment)
    9.58  	pushl $do_stack_segment
    9.59  	jmp do_exception
    9.60  
    9.61 +
    9.62  ENTRY(general_protection)
    9.63  	pushl $do_general_protection
    9.64  	jmp do_exception
    9.65  
    9.66 +
    9.67  ENTRY(alignment_check)
    9.68  	pushl $do_alignment_check
    9.69  	jmp do_exception
    9.70  
    9.71 -# This handler is special, because it gets an extra value on its stack,
    9.72 -# which is the linear faulting address.
    9.73 -# fastcall register usage:  %eax = pt_regs, %edx = error code,
    9.74 -#			    %ecx = fault address
    9.75 +
    9.76  ENTRY(page_fault)
    9.77 -	pushl %ds
    9.78 -	pushl %eax
    9.79 -	xorl %eax, %eax
    9.80 -	pushl %ebp
    9.81 -	pushl %edi
    9.82 -	pushl %esi
    9.83 -	pushl %edx
    9.84 -	decl %eax			/* eax = -1 */
    9.85 -	pushl %ecx
    9.86 -	pushl %ebx
    9.87 -	cld
    9.88 -	movl ORIG_EAX(%esp), %edi
    9.89 -	movl %eax, ORIG_EAX(%esp)
    9.90 -	movl %es, %ecx
    9.91 -	movl %ecx, ES(%esp)
    9.92 -	movl $(__KERNEL_DS),%eax
    9.93 -	movl %eax, %ds
    9.94 -	movl %eax, %es
    9.95 -	pushl %edi
    9.96 -	movl %esp, %eax
    9.97 -	pushl %eax
    9.98 -	call do_page_fault
    9.99 -	jmp ret_from_exception
   9.100 -
   9.101 +    pushl $do_page_fault
   9.102 +    jmp do_exception
   9.103 +    
   9.104  ENTRY(machine_check)
   9.105  	pushl $0
   9.106  	pushl $do_machine_check
   9.107  	jmp do_exception
   9.108  
   9.109 +
   9.110  ENTRY(spurious_interrupt_bug)
   9.111  	pushl $0
   9.112  	pushl $do_spurious_interrupt_bug
   9.113  	jmp do_exception
   9.114  
   9.115 +
   9.116 +
   9.117  ENTRY(thread_starter)
   9.118      popl %eax
   9.119      popl %ebx
    10.1 --- a/extras/mini-os/x86_64.S	Tue May 02 18:17:59 2006 +0100
    10.2 +++ b/extras/mini-os/x86_64.S	Thu May 04 14:19:19 2006 +0100
    10.3 @@ -13,40 +13,6 @@
    10.4  #define ENTRY(X) .globl X ; X :
    10.5  .globl _start, shared_info, hypercall_page
    10.6  
    10.7 -#define SAVE_ALL \
    10.8 -        cld; \
    10.9 -        pushq %rdi; \
   10.10 -        pushq %rsi; \
   10.11 -        pushq %rdx; \
   10.12 -        pushq %rcx; \
   10.13 -        pushq %rax; \
   10.14 -        pushq %r8; \
   10.15 -        pushq %r9; \
   10.16 -        pushq %r10; \
   10.17 -        pushq %r11; \
   10.18 -        pushq %rbx; \
   10.19 -        pushq %rbp; \
   10.20 -        pushq %r12; \
   10.21 -        pushq %r13; \
   10.22 -        pushq %r14; \
   10.23 -        pushq %r15;
   10.24 -
   10.25 -#define RESTORE_ALL \
   10.26 -        popq  %r15; \
   10.27 -        popq  %r14; \
   10.28 -        popq  %r13; \
   10.29 -        popq  %r12; \
   10.30 -        popq  %rbp; \
   10.31 -        popq  %rbx; \
   10.32 -        popq  %r11; \
   10.33 -        popq  %r10; \
   10.34 -        popq  %r9; \
   10.35 -        popq  %r8; \
   10.36 -        popq  %rax; \
   10.37 -        popq  %rcx; \
   10.38 -        popq  %rdx; \
   10.39 -        popq  %rsi; \
   10.40 -        popq  %rdi
   10.41  
   10.42  _start:
   10.43          cld
   10.44 @@ -240,7 +206,17 @@ error_call_handler:
   10.45  #	CFI_ENDPROC
   10.46  .endm	
   10.47  
   10.48 -
   10.49 +.macro errorentry sym
   10.50 +#	XCPT_FRAME
   10.51 +        movq (%rsp),%rcx
   10.52 +        movq 8(%rsp),%r11
   10.53 +        addq $0x10,%rsp /* rsp points to the error code */
   10.54 +	pushq %rax
   10.55 +#	CFI_ADJUST_CFA_OFFSET 8
   10.56 +	leaq  \sym(%rip),%rax
   10.57 +	jmp error_entry
   10.58 +#	CFI_ENDPROC
   10.59 +.endm
   10.60  
   10.61  #define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
   10.62  #define XEN_PUT_VCPU_INFO(reg)
   10.63 @@ -319,159 +295,84 @@ ENTRY(failsafe_callback)
   10.64          popq  %r11
   10.65          iretq
   10.66  
   10.67 -error_code:
   10.68 -        SAVE_ALL
   10.69 -        movq  %rsp,%rdi
   10.70 -        movl  15*8+4(%rsp),%eax
   10.71 -        leaq  exception_table(%rip),%rdx
   10.72 -        callq *(%rdx,%rax,8)
   10.73 -        RESTORE_ALL
   10.74 -        addq  $8,%rsp
   10.75 -        iretq
   10.76 -                        
   10.77 -ENTRY(divide_error)
   10.78 -        popq  %rcx
   10.79 -        popq  %r11
   10.80 -	pushq $0
   10.81 -        movl  $TRAP_divide_error,4(%rsp)
   10.82 -        jmp   error_code
   10.83 -        
   10.84 +
   10.85  ENTRY(coprocessor_error)
   10.86 -        popq  %rcx
   10.87 -        popq  %r11
   10.88 -	pushq $0
   10.89 -        movl  $TRAP_copro_error,4(%rsp)
   10.90 -        jmp   error_code
   10.91 +        zeroentry do_coprocessor_error
   10.92 +
   10.93  
   10.94  ENTRY(simd_coprocessor_error)
   10.95 -        popq  %rcx
   10.96 -        popq  %r11
   10.97 -	pushq $0
   10.98 -        movl  $TRAP_simd_error,4(%rsp)
   10.99 -        jmp   error_code
  10.100 +        zeroentry do_simd_coprocessor_error
  10.101 +
  10.102  
  10.103  ENTRY(device_not_available)
  10.104 -        popq  %rcx
  10.105 -        popq  %r11
  10.106 -        movl  $TRAP_no_device,4(%rsp)
  10.107 -        jmp   error_code
  10.108 +        zeroentry do_device_not_available
  10.109 +
  10.110  
  10.111  ENTRY(debug)
  10.112 -        popq  %rcx
  10.113 -        popq  %r11
  10.114 -	pushq $0
  10.115 -        movl  $TRAP_debug,4(%rsp)
  10.116 -        jmp   error_code
  10.117 +#       INTR_FRAME
  10.118 +#       CFI_ADJUST_CFA_OFFSET 8 */
  10.119 +        zeroentry do_debug
  10.120 +#       CFI_ENDPROC
  10.121 +
  10.122  
  10.123  ENTRY(int3)
  10.124 -        popq  %rcx
  10.125 -        popq  %r11
  10.126 -	pushq $0
  10.127 -        movl  $TRAP_int3,4(%rsp)
  10.128 -        jmp   error_code
  10.129 +#       INTR_FRAME
  10.130 +#       CFI_ADJUST_CFA_OFFSET 8 */
  10.131 +        zeroentry do_int3
  10.132 +#       CFI_ENDPROC
  10.133  
  10.134  ENTRY(overflow)
  10.135 -        popq  %rcx
  10.136 -        popq  %r11
  10.137 -	pushq $0
  10.138 -        movl  $TRAP_overflow,4(%rsp)
  10.139 -        jmp   error_code
  10.140 +        zeroentry do_overflow
  10.141 +
  10.142  
  10.143  ENTRY(bounds)
  10.144 -        popq  %rcx
  10.145 -        popq  %r11
  10.146 -	pushq $0
  10.147 -        movl  $TRAP_bounds,4(%rsp)
  10.148 -        jmp   error_code
  10.149 +        zeroentry do_bounds
  10.150 +    
  10.151 +    
  10.152 +ENTRY(invalid_op)
  10.153 +        zeroentry do_invalid_op
  10.154  
  10.155 -ENTRY(invalid_op)
  10.156 -        popq  %rcx
  10.157 -        popq  %r11
  10.158 -	pushq $0
  10.159 -        movl  $TRAP_invalid_op,4(%rsp)
  10.160 -        jmp   error_code
  10.161  
  10.162  ENTRY(coprocessor_segment_overrun)
  10.163 -        popq  %rcx
  10.164 -        popq  %r11
  10.165 -	pushq $0
  10.166 -        movl  $TRAP_copro_seg,4(%rsp)
  10.167 -        jmp   error_code
  10.168 +        zeroentry do_coprocessor_segment_overrun
  10.169 +
  10.170  
  10.171  ENTRY(invalid_TSS)
  10.172 -        popq  %rcx
  10.173 -        popq  %r11
  10.174 -        movl  $TRAP_invalid_tss,4(%rsp)
  10.175 -        jmp   error_code
  10.176 +        errorentry do_invalid_TSS
  10.177 +
  10.178  
  10.179  ENTRY(segment_not_present)
  10.180 -        popq  %rcx
  10.181 -        popq  %r11
  10.182 -        movl  $TRAP_no_segment,4(%rsp)
  10.183 -        jmp   error_code
  10.184 +        errorentry do_segment_not_present
  10.185 +
  10.186  
  10.187 +/* runs on exception stack */
  10.188  ENTRY(stack_segment)
  10.189 -        popq  %rcx
  10.190 -        popq  %r11
  10.191 -        movl  $TRAP_stack_error,4(%rsp)
  10.192 -        jmp   error_code
  10.193 +#       XCPT_FRAME
  10.194 +        errorentry do_stack_segment
  10.195 +#       CFI_ENDPROC
  10.196 +                    
  10.197  
  10.198  ENTRY(general_protection)
  10.199 -        popq  %rcx
  10.200 -        popq  %r11
  10.201 -        movl  $TRAP_gp_fault,4(%rsp)
  10.202 -        jmp   error_code
  10.203 +        errorentry do_general_protection
  10.204 +
  10.205  
  10.206  ENTRY(alignment_check)
  10.207 -        popq  %rcx
  10.208 -        popq  %r11
  10.209 -        movl  $TRAP_alignment_check,4(%rsp)
  10.210 -        jmp   error_code
  10.211 +        errorentry do_alignment_check
  10.212 +
  10.213  
  10.214 -ENTRY(virt_cr2)
  10.215 -        .quad 0
  10.216 -ENTRY(page_fault)
  10.217 -        popq  %rcx
  10.218 -        popq  %r11
  10.219 -        popq  virt_cr2(%rip)
  10.220 -        movl  $TRAP_page_fault,4(%rsp)
  10.221 -        jmp   error_code
  10.222 -        
  10.223 -ENTRY(machine_check)
  10.224 -        popq  %rcx
  10.225 -        popq  %r11
  10.226 -	pushq $0
  10.227 -        movl  $TRAP_machine_check,4(%rsp)
  10.228 -        jmp   error_code
  10.229 +ENTRY(divide_error)
  10.230 +        zeroentry do_divide_error
  10.231 +
  10.232  
  10.233  ENTRY(spurious_interrupt_bug)
  10.234 -        popq  %rcx
  10.235 -        popq  %r11
  10.236 -	pushq $0
  10.237 -        movl  $TRAP_spurious_int,4(%rsp)
  10.238 -        jmp   error_code
  10.239 +        zeroentry do_spurious_interrupt_bug
  10.240 +            
  10.241  
  10.242 -ENTRY(exception_table)
  10.243 -        .quad do_divide_error
  10.244 -        .quad do_debug
  10.245 -        .quad 0 # nmi
  10.246 -        .quad do_int3
  10.247 -        .quad do_overflow
  10.248 -        .quad do_bounds
  10.249 -        .quad do_invalid_op
  10.250 -        .quad 0
  10.251 -        .quad 0
  10.252 -        .quad do_coprocessor_segment_overrun
  10.253 -        .quad do_invalid_TSS
  10.254 -        .quad do_segment_not_present
  10.255 -        .quad do_stack_segment
  10.256 -        .quad do_general_protection
  10.257 -        .quad do_page_fault
  10.258 -        .quad do_spurious_interrupt_bug
  10.259 -        .quad do_coprocessor_error
  10.260 -        .quad do_alignment_check
  10.261 -        .quad do_machine_check
  10.262 -        .quad do_simd_coprocessor_error
  10.263 +ENTRY(page_fault)
  10.264 +        errorentry do_page_fault
  10.265 +
  10.266 +
  10.267 +
  10.268  
  10.269  
  10.270  ENTRY(thread_starter)
    11.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre	Tue May 02 18:17:59 2006 +0100
    11.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre	Thu May 04 14:19:19 2006 +0100
    11.3 @@ -14,28 +14,12 @@ function try_to_mv() {
    11.4  	fi
    11.5  }
    11.6  
    11.7 -function try_to_mkdir() {
    11.8 -	if [ ! -e $2 ]
    11.9 -	then
   11.10 -		mv $1 $2
   11.11 -		mkdir $1
   11.12 -	fi
   11.13 -}
   11.14 -
   11.15 -try_to_mkdir mm mm.xen-x86
   11.16 -try_to_mv net net.xen-x86
   11.17 -try_to_mv kernel kernel.xen-x86
   11.18 -try_to_mv drivers/acpi/tables.c drivers/acpi/tables.c.xen-x86
   11.19 -#try_to_mv arch/xen/kernel drivers/xen/core
   11.20 -#try_to_mkdir arch/xen arch/xen.xen-x86
   11.21 -#try_to_mv arch/xen.xen-x86/configs arch/xen
   11.22 -#try_to_mv include/asm-generic include/asm-generic.xen-x86
   11.23 -try_to_mkdir include/linux include/linux.xen-x86
   11.24 +try_to_mv mm/Kconfig mm/Kconfig.xen-x86
   11.25  
   11.26  # need to grab a couple of xen-modified files for generic_page_range and
   11.27  # typedef pte_fn_t which are used by driver/xen blkif
   11.28 -ln -sf ../mm.xen-x86/memory.c mm/
   11.29 -ln -sf ../linux.xen-x86/mm.h include/linux/
   11.30 +#ln -sf ../mm.xen-x86/memory.c mm/
   11.31 +#ln -sf ../linux.xen-x86/mm.h include/linux/
   11.32  
   11.33  #eventually asm-xsi-offsets needs to be part of hypervisor.h/hypercall.h
   11.34  ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/asm-ia64/xen/
    12.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig	Tue May 02 18:17:59 2006 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig	Thu May 04 14:19:19 2006 +0100
    12.3 @@ -51,7 +51,7 @@ config TCG_INFINEON
    12.4  
    12.5  config TCG_XEN
    12.6  	tristate "XEN TPM Interface"
    12.7 -	depends on TCG_TPM && XEN && XEN_TPMDEV_FRONTEND
    12.8 +	depends on TCG_TPM && XEN
    12.9  	---help---
   12.10  	  If you want to make TPM support available to a Xen
   12.11  	  user domain, say Yes and it will
   12.12 @@ -60,4 +60,3 @@ config TCG_XEN
   12.13            tpm_xen.
   12.14  
   12.15  endmenu
   12.16 -
    13.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/Makefile	Tue May 02 18:17:59 2006 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Makefile	Thu May 04 14:19:19 2006 +0100
    13.3 @@ -8,4 +8,4 @@ endif
    13.4  obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
    13.5  obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
    13.6  obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
    13.7 -obj-$(CONFIG_TCG_XEN) += tpm_xen.o
    13.8 +obj-$(CONFIG_TCG_XEN) += tpm_xen.o tpm_vtpm.o
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c	Thu May 04 14:19:19 2006 +0100
    14.3 @@ -0,0 +1,546 @@
    14.4 +/*
    14.5 + * Copyright (C) 2006 IBM Corporation
    14.6 + *
    14.7 + * Authors:
    14.8 + * Stefan Berger <stefanb@us.ibm.com>
    14.9 + *
   14.10 + * Generic device driver part for device drivers in a virtualized
   14.11 + * environment.
   14.12 + *
   14.13 + * This program is free software; you can redistribute it and/or
   14.14 + * modify it under the terms of the GNU General Public License as
   14.15 + * published by the Free Software Foundation, version 2 of the
   14.16 + * License.
   14.17 + *
   14.18 + */
   14.19 +
   14.20 +#include <asm/uaccess.h>
   14.21 +#include <linux/list.h>
   14.22 +#include <linux/device.h>
   14.23 +#include <linux/interrupt.h>
   14.24 +#include <linux/platform_device.h>
   14.25 +#include "tpm.h"
   14.26 +#include "tpm_vtpm.h"
   14.27 +
   14.28 +/* read status bits */
   14.29 +enum {
   14.30 +	STATUS_BUSY = 0x01,
   14.31 +	STATUS_DATA_AVAIL = 0x02,
   14.32 +	STATUS_READY = 0x04
   14.33 +};
   14.34 +
   14.35 +#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
   14.36 +
   14.37 +struct transmission {
   14.38 +	struct list_head next;
   14.39 +
   14.40 +	unsigned char *request;
   14.41 +	size_t  request_len;
   14.42 +	size_t  request_buflen;
   14.43 +
   14.44 +	unsigned char *response;
   14.45 +	size_t  response_len;
   14.46 +	size_t  response_buflen;
   14.47 +
   14.48 +	unsigned int flags;
   14.49 +};
   14.50 +
   14.51 +enum {
   14.52 +	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
   14.53 +};
   14.54 +
   14.55 +struct vtpm_state {
   14.56 +	struct transmission *current_request;
   14.57 +	spinlock_t           req_list_lock;
   14.58 +	wait_queue_head_t    req_wait_queue;
   14.59 +
   14.60 +	struct list_head     queued_requests;
   14.61 +
   14.62 +	struct transmission *current_response;
   14.63 +	spinlock_t           resp_list_lock;
   14.64 +	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
   14.65 +
   14.66 +	struct transmission *req_cancelled;       // if a cancellation was encounterd
   14.67 +
   14.68 +	u8                   vd_status;
   14.69 +	u8                   flags;
   14.70 +
   14.71 +	unsigned long        disconnect_time;
   14.72 +
   14.73 +	struct tpm_virtual_device *tpmvd;
   14.74 +};
   14.75 +
   14.76 +enum {
   14.77 +	DATAEX_FLAG_QUEUED_ONLY = 0x1
   14.78 +};
   14.79 +
   14.80 +
   14.81 +/* local variables */
   14.82 +static struct vtpm_state *vtpms;
   14.83 +
   14.84 +/* local function prototypes */
   14.85 +static int _vtpm_send_queued(struct tpm_chip *chip);
   14.86 +
   14.87 +
   14.88 +/* =============================================================
   14.89 + * Some utility functions
   14.90 + * =============================================================
   14.91 + */
   14.92 +static void vtpm_state_init(struct vtpm_state *vtpms)
   14.93 +{
   14.94 +	vtpms->current_request = NULL;
   14.95 +	spin_lock_init(&vtpms->req_list_lock);
   14.96 +	init_waitqueue_head(&vtpms->req_wait_queue);
   14.97 +	INIT_LIST_HEAD(&vtpms->queued_requests);
   14.98 +
   14.99 +	vtpms->current_response = NULL;
  14.100 +	spin_lock_init(&vtpms->resp_list_lock);
  14.101 +	init_waitqueue_head(&vtpms->resp_wait_queue);
  14.102 +
  14.103 +	vtpms->disconnect_time = jiffies;
  14.104 +}
  14.105 +
  14.106 +
  14.107 +static inline struct transmission *transmission_alloc(void)
  14.108 +{
  14.109 +	return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
  14.110 +}
  14.111 +
  14.112 +static unsigned char *
  14.113 +transmission_set_req_buffer(struct transmission *t,
  14.114 +                            unsigned char *buffer, size_t len)
  14.115 +{
  14.116 +	if (t->request_buflen < len) {
  14.117 +		kfree(t->request);
  14.118 +		t->request = kmalloc(len, GFP_KERNEL);
  14.119 +		if (!t->request) {
  14.120 +			t->request_buflen = 0;
  14.121 +			return NULL;
  14.122 +		}
  14.123 +		t->request_buflen = len;
  14.124 +	}
  14.125 +
  14.126 +	memcpy(t->request, buffer, len);
  14.127 +	t->request_len = len;
  14.128 +
  14.129 +	return t->request;
  14.130 +}
  14.131 +
  14.132 +static unsigned char *
  14.133 +transmission_set_res_buffer(struct transmission *t,
  14.134 +                            const unsigned char *buffer, size_t len)
  14.135 +{
  14.136 +	if (t->response_buflen < len) {
  14.137 +		kfree(t->response);
  14.138 +		t->response = kmalloc(len, GFP_ATOMIC);
  14.139 +		if (!t->response) {
  14.140 +			t->response_buflen = 0;
  14.141 +			return NULL;
  14.142 +		}
  14.143 +		t->response_buflen = len;
  14.144 +	}
  14.145 +
  14.146 +	memcpy(t->response, buffer, len);
  14.147 +	t->response_len = len;
  14.148 +
  14.149 +	return t->response;
  14.150 +}
  14.151 +
  14.152 +static inline void transmission_free(struct transmission *t)
  14.153 +{
  14.154 +	kfree(t->request);
  14.155 +	kfree(t->response);
  14.156 +	kfree(t);
  14.157 +}
  14.158 +
  14.159 +/* =============================================================
  14.160 + * Interface with the lower layer driver
  14.161 + * =============================================================
  14.162 + */
  14.163 +/*
  14.164 + * Lower layer uses this function to make a response available.
  14.165 + */
  14.166 +int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr)
  14.167 +{
  14.168 +	unsigned long flags;
  14.169 +	int ret_size = 0;
  14.170 +	struct transmission *t;
  14.171 +
  14.172 +	/*
  14.173 +	 * The list with requests must contain one request
  14.174 +	 * only and the element there must be the one that
  14.175 +	 * was passed to me from the front-end.
  14.176 +	 */
  14.177 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
  14.178 +	if (vtpms->current_request != ptr) {
  14.179 +		printk("WARNING: The request pointer is different than the "
  14.180 +		       "pointer the shared memory driver returned to me. "
  14.181 +		       "%p != %p\n",
  14.182 +		       vtpms->current_request, ptr);
  14.183 +	}
  14.184 +
  14.185 +	/*
  14.186 +	 * If the request has been cancelled, just quit here
  14.187 +	 */
  14.188 +	if (vtpms->req_cancelled == (struct transmission *)ptr) {
  14.189 +		if (vtpms->current_request == vtpms->req_cancelled) {
  14.190 +			vtpms->current_request = NULL;
  14.191 +		}
  14.192 +		transmission_free(vtpms->req_cancelled);
  14.193 +		vtpms->req_cancelled = NULL;
  14.194 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.195 +		return 0;
  14.196 +	}
  14.197 +
  14.198 +	if (NULL != (t = vtpms->current_request)) {
  14.199 +		transmission_free(t);
  14.200 +		vtpms->current_request = NULL;
  14.201 +	}
  14.202 +
  14.203 +	t = transmission_alloc();
  14.204 +	if (t) {
  14.205 +		if (!transmission_set_res_buffer(t, buffer, count)) {
  14.206 +			transmission_free(t);
  14.207 +			spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.208 +			return -ENOMEM;
  14.209 +		}
  14.210 +		ret_size = count;
  14.211 +		vtpms->current_response = t;
  14.212 +		wake_up_interruptible(&vtpms->resp_wait_queue);
  14.213 +	}
  14.214 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.215 +
  14.216 +	return ret_size;
  14.217 +}
  14.218 +
  14.219 +
  14.220 +/*
  14.221 + * Lower layer indicates its status (connected/disconnected)
  14.222 + */
  14.223 +void vtpm_vd_status(u8 vd_status)
  14.224 +{
  14.225 +	vtpms->vd_status = vd_status;
  14.226 +	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
  14.227 +		vtpms->disconnect_time = jiffies;
  14.228 +	}
  14.229 +}
  14.230 +
  14.231 +/* =============================================================
  14.232 + * Interface with the generic TPM driver
  14.233 + * =============================================================
  14.234 + */
  14.235 +static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  14.236 +{
  14.237 +	int rc = 0;
  14.238 +	unsigned long flags;
  14.239 +
  14.240 +	/*
  14.241 +	 * Check if the previous operation only queued the command
  14.242 +	 * In this case there won't be a response, so I just
  14.243 +	 * return from here and reset that flag. In any other
  14.244 +	 * case I should receive a response from the back-end.
  14.245 +	 */
  14.246 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
  14.247 +	if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
  14.248 +		vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
  14.249 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.250 +		/*
  14.251 +		 * The first few commands (measurements) must be
  14.252 +		 * queued since it might not be possible to talk to the
  14.253 +		 * TPM, yet.
  14.254 +		 * Return a response of up to 30 '0's.
  14.255 +		 */
  14.256 +
  14.257 +		count = MIN(count, 30);
  14.258 +		memset(buf, 0x0, count);
  14.259 +		return count;
  14.260 +	}
  14.261 +	/*
  14.262 +	 * Check whether something is in the responselist and if
  14.263 +	 * there's nothing in the list wait for something to appear.
  14.264 +	 */
  14.265 +
  14.266 +	if (!vtpms->current_response) {
  14.267 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.268 +		interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
  14.269 +		                               1000);
  14.270 +		spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
  14.271 +	}
  14.272 +
  14.273 +	if (vtpms->current_response) {
  14.274 +		struct transmission *t = vtpms->current_response;
  14.275 +		vtpms->current_response = NULL;
  14.276 +		rc = MIN(count, t->response_len);
  14.277 +		memcpy(buf, t->response, rc);
  14.278 +		transmission_free(t);
  14.279 +	}
  14.280 +
  14.281 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.282 +	return rc;
  14.283 +}
  14.284 +
  14.285 +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  14.286 +{
  14.287 +	int rc = 0;
  14.288 +	unsigned long flags;
  14.289 +	struct transmission *t = transmission_alloc();
  14.290 +
  14.291 +	if (!t)
  14.292 +		return -ENOMEM;
  14.293 +	/*
  14.294 +	 * If there's a current request, it must be the
  14.295 +	 * previous request that has timed out.
  14.296 +	 */
  14.297 +	spin_lock_irqsave(&vtpms->req_list_lock, flags);
  14.298 +	if (vtpms->current_request != NULL) {
  14.299 +		printk("WARNING: Sending although there is a request outstanding.\n"
  14.300 +		       "         Previous request must have timed out.\n");
  14.301 +		transmission_free(vtpms->current_request);
  14.302 +		vtpms->current_request = NULL;
  14.303 +	}
  14.304 +	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
  14.305 +
  14.306 +	/*
  14.307 +	 * Queue the packet if the driver below is not
  14.308 +	 * ready, yet, or there is any packet already
  14.309 +	 * in the queue.
  14.310 +	 * If the driver below is ready, unqueue all
  14.311 +	 * packets first before sending our current
  14.312 +	 * packet.
  14.313 +	 * For each unqueued packet, except for the
  14.314 +	 * last (=current) packet, call the function
  14.315 +	 * tpm_xen_recv to wait for the response to come
  14.316 +	 * back.
  14.317 +	 */
  14.318 +	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
  14.319 +		if (time_after(jiffies,
  14.320 +		               vtpms->disconnect_time + HZ * 10)) {
  14.321 +			rc = -ENOENT;
  14.322 +		} else {
  14.323 +			goto queue_it;
  14.324 +		}
  14.325 +	} else {
  14.326 +		/*
  14.327 +		 * Send all queued packets.
  14.328 +		 */
  14.329 +		if (_vtpm_send_queued(chip) == 0) {
  14.330 +
  14.331 +			vtpms->current_request = t;
  14.332 +
  14.333 +			rc = vtpm_vd_send(chip,
  14.334 +			                  vtpms->tpmvd->tpm_private,
  14.335 +			                  buf,
  14.336 +			                  count,
  14.337 +			                  t);
  14.338 +			/*
  14.339 +			 * The generic TPM driver will call
  14.340 +			 * the function to receive the response.
  14.341 +			 */
  14.342 +			if (rc < 0) {
  14.343 +				vtpms->current_request = NULL;
  14.344 +				goto queue_it;
  14.345 +			}
  14.346 +		} else {
  14.347 +queue_it:
  14.348 +			if (!transmission_set_req_buffer(t, buf, count)) {
  14.349 +				transmission_free(t);
  14.350 +				rc = -ENOMEM;
  14.351 +				goto exit;
  14.352 +			}
  14.353 +			/*
  14.354 +			 * An error occurred. Don't event try
  14.355 +			 * to send the current request. Just
  14.356 +			 * queue it.
  14.357 +			 */
  14.358 +			spin_lock_irqsave(&vtpms->req_list_lock, flags);
  14.359 +			vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
  14.360 +			list_add_tail(&t->next, &vtpms->queued_requests);
  14.361 +			spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
  14.362 +		}
  14.363 +	}
  14.364 +
  14.365 +exit:
  14.366 +	return rc;
  14.367 +}
  14.368 +
  14.369 +
  14.370 +/*
  14.371 + * Send all queued requests.
  14.372 + */
  14.373 +static int _vtpm_send_queued(struct tpm_chip *chip)
  14.374 +{
  14.375 +	int rc;
  14.376 +	int error = 0;
  14.377 +	long flags;
  14.378 +	unsigned char buffer[1];
  14.379 +
  14.380 +	spin_lock_irqsave(&vtpms->req_list_lock, flags);
  14.381 +
  14.382 +	while (!list_empty(&vtpms->queued_requests)) {
  14.383 +		/*
  14.384 +		 * Need to dequeue them.
  14.385 +		 * Read the result into a dummy buffer.
  14.386 +		 */
  14.387 +		struct transmission *qt = (struct transmission *)
  14.388 +		                          vtpms->queued_requests.next;
  14.389 +		list_del(&qt->next);
  14.390 +		vtpms->current_request = qt;
  14.391 +		spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
  14.392 +
  14.393 +		rc = vtpm_vd_send(chip,
  14.394 +		                  vtpms->tpmvd->tpm_private,
  14.395 +		                  qt->request,
  14.396 +		                  qt->request_len,
  14.397 +		                  qt);
  14.398 +
  14.399 +		if (rc < 0) {
  14.400 +			spin_lock_irqsave(&vtpms->req_list_lock, flags);
  14.401 +			if ((qt = vtpms->current_request) != NULL) {
  14.402 +				/*
  14.403 +				 * requeue it at the beginning
  14.404 +				 * of the list
  14.405 +				 */
  14.406 +				list_add(&qt->next,
  14.407 +				         &vtpms->queued_requests);
  14.408 +			}
  14.409 +			vtpms->current_request = NULL;
  14.410 +			error = 1;
  14.411 +			break;
  14.412 +		}
  14.413 +		/*
  14.414 +		 * After this point qt is not valid anymore!
  14.415 +		 * It is freed when the front-end is delivering
  14.416 +		 * the data by calling tpm_recv
  14.417 +		 */
  14.418 +		/*
  14.419 +		 * Receive response into provided dummy buffer
  14.420 +		 */
  14.421 +		rc = vtpm_recv(chip, buffer, sizeof(buffer));
  14.422 +		spin_lock_irqsave(&vtpms->req_list_lock, flags);
  14.423 +	}
  14.424 +
  14.425 +	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
  14.426 +
  14.427 +	return error;
  14.428 +}
  14.429 +
  14.430 +static void vtpm_cancel(struct tpm_chip *chip)
  14.431 +{
  14.432 +	unsigned long flags;
  14.433 +	spin_lock_irqsave(&vtpms->resp_list_lock,flags);
  14.434 +
  14.435 +	vtpms->req_cancelled = vtpms->current_request;
  14.436 +
  14.437 +	spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
  14.438 +}
  14.439 +
  14.440 +static u8 vtpm_status(struct tpm_chip *chip)
  14.441 +{
  14.442 +	u8 rc = 0;
  14.443 +	unsigned long flags;
  14.444 +
  14.445 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
  14.446 +	/*
  14.447 +	 * Data are available if:
  14.448 +	 *  - there's a current response
  14.449 +	 *  - the last packet was queued only (this is fake, but necessary to
  14.450 +	 *      get the generic TPM layer to call the receive function.)
  14.451 +	 */
  14.452 +	if (vtpms->current_response ||
  14.453 +	    0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
  14.454 +		rc = STATUS_DATA_AVAIL;
  14.455 +	}
  14.456 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
  14.457 +	return rc;
  14.458 +}
  14.459 +
  14.460 +static struct file_operations vtpm_ops = {
  14.461 +	.owner = THIS_MODULE,
  14.462 +	.llseek = no_llseek,
  14.463 +	.open = tpm_open,
  14.464 +	.read = tpm_read,
  14.465 +	.write = tpm_write,
  14.466 +	.release = tpm_release,
  14.467 +};
  14.468 +
  14.469 +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  14.470 +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
  14.471 +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
  14.472 +static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
  14.473 +
  14.474 +static struct attribute *vtpm_attrs[] = {
  14.475 +	&dev_attr_pubek.attr,
  14.476 +	&dev_attr_pcrs.attr,
  14.477 +	&dev_attr_caps.attr,
  14.478 +	&dev_attr_cancel.attr,
  14.479 +	NULL,
  14.480 +};
  14.481 +
  14.482 +static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
  14.483 +
  14.484 +static struct tpm_vendor_specific tpm_vtpm = {
  14.485 +	.recv = vtpm_recv,
  14.486 +	.send = vtpm_send,
  14.487 +	.cancel = vtpm_cancel,
  14.488 +	.status = vtpm_status,
  14.489 +	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
  14.490 +	.req_complete_val  = STATUS_DATA_AVAIL,
  14.491 +	.req_canceled = STATUS_READY,
  14.492 +	.base = 0,
  14.493 +	.attr_group = &vtpm_attr_grp,
  14.494 +	.miscdev = {
  14.495 +		.fops = &vtpm_ops,
  14.496 +	},
  14.497 +};
  14.498 +
  14.499 +static struct platform_device *pdev;
  14.500 +
  14.501 +int __init init_vtpm(struct tpm_virtual_device *tvd)
  14.502 +{
  14.503 +	int rc;
  14.504 +
  14.505 +	/* vtpms is global - only allow one user */
  14.506 +	if (vtpms)
  14.507 +		return -EBUSY;
  14.508 +
  14.509 +	vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
  14.510 +	if (!vtpms)
  14.511 +		return -ENOMEM;
  14.512 +
  14.513 +	vtpm_state_init(vtpms);
  14.514 +	vtpms->tpmvd = tvd;
  14.515 +
  14.516 +	pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
  14.517 +	if (IS_ERR(pdev)) {
  14.518 +		rc = PTR_ERR(pdev);
  14.519 +		goto err_free_mem;
  14.520 +	}
  14.521 +
  14.522 +	if (tvd)
  14.523 +		tpm_vtpm.buffersize = tvd->max_tx_size;
  14.524 +
  14.525 +	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_vtpm)) < 0) {
  14.526 +		goto err_unreg_pdev;
  14.527 +	}
  14.528 +
  14.529 +	return 0;
  14.530 +
  14.531 +err_unreg_pdev:
  14.532 +	platform_device_unregister(pdev);
  14.533 +err_free_mem:
  14.534 +	kfree(vtpms);
  14.535 +	vtpms = NULL;
  14.536 +
  14.537 +	return rc;
  14.538 +}
  14.539 +
  14.540 +void __exit cleanup_vtpm(void)
  14.541 +{
  14.542 +	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
  14.543 +	if (chip) {
  14.544 +		tpm_remove_hardware(chip->dev);
  14.545 +		platform_device_unregister(pdev);
  14.546 +	}
  14.547 +	kfree(vtpms);
  14.548 +	vtpms = NULL;
  14.549 +}
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h	Thu May 04 14:19:19 2006 +0100
    15.3 @@ -0,0 +1,38 @@
    15.4 +#ifndef TPM_VTPM_H
    15.5 +#define TPM_VTPM_H
    15.6 +
    15.7 +struct tpm_chip;
    15.8 +struct tpm_private;
    15.9 +
   15.10 +struct tpm_virtual_device {
   15.11 +	/*
   15.12 +	 * This field indicates the maximum size the driver can
   15.13 +	 * transfer in one chunk. It is filled in by the front-end
   15.14 +	 * driver and should be propagated to the generic tpm driver
   15.15 +	 * for allocation of buffers.
   15.16 +	 */
   15.17 +	unsigned int max_tx_size;
   15.18 +	/*
   15.19 +	 * The following is a private structure of the underlying
   15.20 +	 * driver. It is passed as parameter in the send function.
   15.21 +	 */
   15.22 +	struct tpm_private *tpm_private;
   15.23 +};
   15.24 +
   15.25 +enum vdev_status {
   15.26 +	TPM_VD_STATUS_DISCONNECTED = 0x0,
   15.27 +	TPM_VD_STATUS_CONNECTED = 0x1
   15.28 +};
   15.29 +
   15.30 +/* this function is called from tpm_vtpm.c */
   15.31 +int vtpm_vd_send(struct tpm_chip *tc,
   15.32 +                 struct tpm_private * tp,
   15.33 +                 const u8 * buf, size_t count, void *ptr);
   15.34 +
   15.35 +/* these functions are offered by tpm_vtpm.c */
   15.36 +int __init init_vtpm(struct tpm_virtual_device *);
   15.37 +void __exit cleanup_vtpm(void);
   15.38 +int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr);
   15.39 +void vtpm_vd_status(u8 status);
   15.40 +
   15.41 +#endif
    16.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Tue May 02 18:17:59 2006 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Thu May 04 14:19:19 2006 +0100
    16.3 @@ -1,536 +1,767 @@
    16.4  /*
    16.5 - * Copyright (C) 2004 IBM Corporation
    16.6 + * Copyright (c) 2005, IBM Corporation
    16.7   *
    16.8 - * Authors:
    16.9 - * Leendert van Doorn <leendert@watson.ibm.com>
   16.10 - * Dave Safford <safford@watson.ibm.com>
   16.11 - * Reiner Sailer <sailer@watson.ibm.com>
   16.12 - * Kylene Hall <kjhall@us.ibm.com>
   16.13 - * Stefan Berger <stefanb@us.ibm.com>
   16.14 + * Author: Stefan Berger, stefanb@us.ibm.com
   16.15 + * Grant table support: Mahadevan Gomathisankaran
   16.16   *
   16.17 - * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   16.18 + * This code has been derived from drivers/xen/netfront/netfront.c
   16.19   *
   16.20 - * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
   16.21 - * Specifications at www.trustedcomputinggroup.org
   16.22 + * Copyright (c) 2002-2004, K A Fraser
   16.23   *
   16.24   * This program is free software; you can redistribute it and/or
   16.25 - * modify it under the terms of the GNU General Public License as
   16.26 - * published by the Free Software Foundation, version 2 of the
   16.27 - * License.
   16.28 + * modify it under the terms of the GNU General Public License version 2
   16.29 + * as published by the Free Software Foundation; or, when distributed
   16.30 + * separately from the Linux kernel or incorporated into other
   16.31 + * software packages, subject to the following license:
   16.32   *
   16.33 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   16.34 + * of this source file (the "Software"), to deal in the Software without
   16.35 + * restriction, including without limitation the rights to use, copy, modify,
   16.36 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   16.37 + * and to permit persons to whom the Software is furnished to do so, subject to
   16.38 + * the following conditions:
   16.39 + *
   16.40 + * The above copyright notice and this permission notice shall be included in
   16.41 + * all copies or substantial portions of the Software.
   16.42 + *
   16.43 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   16.44 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   16.45 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   16.46 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   16.47 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   16.48 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   16.49 + * IN THE SOFTWARE.
   16.50   */
   16.51  
   16.52 -#include <asm/uaccess.h>
   16.53 -#include <linux/list.h>
   16.54 -#include <xen/tpmfe.h>
   16.55 -#include <linux/device.h>
   16.56 +#include <linux/errno.h>
   16.57  #include <linux/interrupt.h>
   16.58 -#include <linux/platform_device.h>
   16.59 -#include "tpm.h"
   16.60 +#include <linux/mutex.h>
   16.61 +#include <asm/uaccess.h>
   16.62 +#include <xen/evtchn.h>
   16.63 +#include <xen/interface/grant_table.h>
   16.64 +#include <xen/interface/io/tpmif.h>
   16.65 +#include <xen/xenbus.h>
   16.66 +#include "tpm_vtpm.h"
   16.67 +
   16.68 +#undef DEBUG
   16.69  
   16.70 -/* read status bits */
   16.71 -enum {
   16.72 -	STATUS_BUSY = 0x01,
   16.73 -	STATUS_DATA_AVAIL = 0x02,
   16.74 -	STATUS_READY = 0x04
   16.75 +/* local structures */
   16.76 +struct tpm_private {
   16.77 +	tpmif_tx_interface_t *tx;
   16.78 +	atomic_t refcnt;
   16.79 +	unsigned int evtchn;
   16.80 +	unsigned int irq;
   16.81 +	u8 is_connected;
   16.82 +	u8 is_suspended;
   16.83 +
   16.84 +	spinlock_t tx_lock;
   16.85 +
   16.86 +	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
   16.87 +
   16.88 +	atomic_t tx_busy;
   16.89 +	void *tx_remember;
   16.90 +	domid_t backend_id;
   16.91 +	wait_queue_head_t wait_q;
   16.92 +
   16.93 +	struct xenbus_device *dev;
   16.94 +	int ring_ref;
   16.95  };
   16.96  
   16.97 -#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
   16.98 -
   16.99 -struct transmission {
  16.100 -	struct list_head next;
  16.101 -	unsigned char *request;
  16.102 -	unsigned int request_len;
  16.103 -	unsigned char *rcv_buffer;
  16.104 -	unsigned int  buffersize;
  16.105 -	unsigned int flags;
  16.106 -};
  16.107 -
  16.108 -enum {
  16.109 -	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
  16.110 +struct tx_buffer {
  16.111 +	unsigned int size;	// available space in data
  16.112 +	unsigned int len;	// used space in data
  16.113 +	unsigned char *data;	// pointer to a page
  16.114  };
  16.115  
  16.116 -struct data_exchange {
  16.117 -	struct transmission *current_request;
  16.118 -	spinlock_t           req_list_lock;
  16.119 -	wait_queue_head_t    req_wait_queue;
  16.120 -
  16.121 -	struct list_head     queued_requests;
  16.122 -
  16.123 -	struct transmission *current_response;
  16.124 -	spinlock_t           resp_list_lock;
  16.125 -	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
  16.126 -
  16.127 -	struct transmission *req_cancelled;       // if a cancellation was encounterd
  16.128 -
  16.129 -	unsigned int         fe_status;
  16.130 -	unsigned int         flags;
  16.131 -};
  16.132 -
  16.133 -enum {
  16.134 -	DATAEX_FLAG_QUEUED_ONLY = 0x1
  16.135 -};
  16.136 -
  16.137 -static struct data_exchange dataex;
  16.138 -
  16.139 -static unsigned long disconnect_time;
  16.140 -
  16.141 -static struct tpmfe_device tpmfe;
  16.142 -
  16.143 -/* local function prototypes */
  16.144 -static void __exit cleanup_xen(void);
  16.145 -
  16.146  
  16.147 -/* =============================================================
  16.148 - * Some utility functions
  16.149 - * =============================================================
  16.150 - */
  16.151 -static inline struct transmission *
  16.152 -transmission_alloc(void)
  16.153 -{
  16.154 -	return kzalloc(sizeof(struct transmission), GFP_KERNEL);
  16.155 -}
  16.156 +/* locally visible variables */
  16.157 +static grant_ref_t gref_head;
  16.158 +static struct tpm_private *my_priv;
  16.159  
  16.160 -static inline unsigned char *
  16.161 -transmission_set_buffer(struct transmission *t,
  16.162 -                        unsigned char *buffer, unsigned int len)
  16.163 +/* local function prototypes */
  16.164 +static irqreturn_t tpmif_int(int irq,
  16.165 +                             void *tpm_priv,
  16.166 +                             struct pt_regs *ptregs);
  16.167 +static void tpmif_rx_action(unsigned long unused);
  16.168 +static int tpmif_connect(struct xenbus_device *dev,
  16.169 +                         struct tpm_private *tp,
  16.170 +                         domid_t domid);
  16.171 +static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
  16.172 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
  16.173 +static void tpmif_free_tx_buffers(struct tpm_private *tp);
  16.174 +static void tpmif_set_connected_state(struct tpm_private *tp,
  16.175 +                                      u8 newstate);
  16.176 +static int tpm_xmit(struct tpm_private *tp,
  16.177 +                    const u8 * buf, size_t count, int userbuffer,
  16.178 +                    void *remember);
  16.179 +static void destroy_tpmring(struct tpm_private *tp);
  16.180 +
  16.181 +#define DPRINTK(fmt, args...) \
  16.182 +    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
  16.183 +#define IPRINTK(fmt, args...) \
  16.184 +    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
  16.185 +#define WPRINTK(fmt, args...) \
  16.186 +    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
  16.187 +
  16.188 +#define GRANT_INVALID_REF	0
  16.189 +
  16.190 +
  16.191 +static inline int
  16.192 +tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
  16.193 +               int isuserbuffer)
  16.194  {
  16.195 -	kfree(t->request);
  16.196 -	t->request = kmalloc(len, GFP_KERNEL);
  16.197 -	if (t->request) {
  16.198 -		memcpy(t->request,
  16.199 -		       buffer,
  16.200 -		       len);
  16.201 -		t->request_len = len;
  16.202 +	int copied = len;
  16.203 +
  16.204 +	if (len > txb->size) {
  16.205 +		copied = txb->size;
  16.206  	}
  16.207 -	return t->request;
  16.208 -}
  16.209 -
  16.210 -static inline void
  16.211 -transmission_free(struct transmission *t)
  16.212 -{
  16.213 -	kfree(t->request);
  16.214 -	kfree(t->rcv_buffer);
  16.215 -	kfree(t);
  16.216 +	if (isuserbuffer) {
  16.217 +		if (copy_from_user(txb->data, src, copied))
  16.218 +			return -EFAULT;
  16.219 +	} else {
  16.220 +		memcpy(txb->data, src, copied);
  16.221 +	}
  16.222 +	txb->len = len;
  16.223 +	return copied;
  16.224  }
  16.225  
  16.226 -/* =============================================================
  16.227 - * Interface with the TPM shared memory driver for XEN
  16.228 - * =============================================================
  16.229 - */
  16.230 -static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
  16.231 +static inline struct tx_buffer *tx_buffer_alloc(void)
  16.232  {
  16.233 -	int ret_size = 0;
  16.234 -	struct transmission *t;
  16.235 +	struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
  16.236 +					GFP_KERNEL);
  16.237  
  16.238 -	/*
  16.239 -	 * The list with requests must contain one request
  16.240 -	 * only and the element there must be the one that
  16.241 -	 * was passed to me from the front-end.
  16.242 -	 */
  16.243 -	if (dataex.current_request != ptr) {
  16.244 -		printk("WARNING: The request pointer is different than the "
  16.245 -		       "pointer the shared memory driver returned to me. "
  16.246 -		       "%p != %p\n",
  16.247 -		       dataex.current_request, ptr);
  16.248 -	}
  16.249 -
  16.250 -	/*
  16.251 -	 * If the request has been cancelled, just quit here
  16.252 -	 */
  16.253 -	if (dataex.req_cancelled == (struct transmission *)ptr) {
  16.254 -		if (dataex.current_request == dataex.req_cancelled) {
  16.255 -			dataex.current_request = NULL;
  16.256 +	if (txb) {
  16.257 +		txb->len = 0;
  16.258 +		txb->size = PAGE_SIZE;
  16.259 +		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
  16.260 +		if (txb->data == NULL) {
  16.261 +			kfree(txb);
  16.262 +			txb = NULL;
  16.263  		}
  16.264 -		transmission_free(dataex.req_cancelled);
  16.265 -		dataex.req_cancelled = NULL;
  16.266 -		return 0;
  16.267  	}
  16.268 -
  16.269 -	if (NULL != (t = dataex.current_request)) {
  16.270 -		transmission_free(t);
  16.271 -		dataex.current_request = NULL;
  16.272 -	}
  16.273 -
  16.274 -	t = transmission_alloc();
  16.275 -	if (t) {
  16.276 -		unsigned long flags;
  16.277 -		t->rcv_buffer = kmalloc(count, GFP_KERNEL);
  16.278 -		if (! t->rcv_buffer) {
  16.279 -			transmission_free(t);
  16.280 -			return -ENOMEM;
  16.281 -		}
  16.282 -		t->buffersize = count;
  16.283 -		memcpy(t->rcv_buffer, buffer, count);
  16.284 -		ret_size = count;
  16.285 -
  16.286 -		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
  16.287 -		dataex.current_response = t;
  16.288 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  16.289 -		wake_up_interruptible(&dataex.resp_wait_queue);
  16.290 -	}
  16.291 -	return ret_size;
  16.292 +	return txb;
  16.293  }
  16.294  
  16.295  
  16.296 -static void tpm_fe_status(unsigned int flags)
  16.297 +static inline void tx_buffer_free(struct tx_buffer *txb)
  16.298  {
  16.299 -	dataex.fe_status = flags;
  16.300 -	if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
  16.301 -		disconnect_time = jiffies;
  16.302 +	if (txb) {
  16.303 +		free_page((long)txb->data);
  16.304 +		kfree(txb);
  16.305 +	}
  16.306 +}
  16.307 +
  16.308 +/**************************************************************
  16.309 + Utility function for the tpm_private structure
  16.310 +**************************************************************/
  16.311 +static inline void tpm_private_init(struct tpm_private *tp)
  16.312 +{
  16.313 +	spin_lock_init(&tp->tx_lock);
  16.314 +	init_waitqueue_head(&tp->wait_q);
  16.315 +	atomic_set(&tp->refcnt, 1);
  16.316 +}
  16.317 +
  16.318 +static inline void tpm_private_put(void)
  16.319 +{
  16.320 +	if ( atomic_dec_and_test(&my_priv->refcnt)) {
  16.321 +		tpmif_free_tx_buffers(my_priv);
  16.322 +		kfree(my_priv);
  16.323 +		my_priv = NULL;
  16.324  	}
  16.325  }
  16.326  
  16.327 -/* =============================================================
  16.328 - * Interface with the generic TPM driver
  16.329 - * =============================================================
  16.330 - */
  16.331 -static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
  16.332 -{
  16.333 -	unsigned long flags;
  16.334 -	int rc = 0;
  16.335 -
  16.336 -	spin_lock_irqsave(&dataex.resp_list_lock, flags);
  16.337 -	/*
  16.338 -	 * Check if the previous operation only queued the command
  16.339 -	 * In this case there won't be a response, so I just
  16.340 -	 * return from here and reset that flag. In any other
  16.341 -	 * case I should receive a response from the back-end.
  16.342 -	 */
  16.343 -	if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
  16.344 -		dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
  16.345 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  16.346 -		/*
  16.347 -		 * a little hack here. The first few measurements
  16.348 -		 * are queued since there's no way to talk to the
  16.349 -		 * TPM yet (due to slowness of the control channel)
  16.350 -		 * So we just make IMA happy by giving it 30 NULL
  16.351 -		 * bytes back where the most important part is
  16.352 -		 * that the result code is '0'.
  16.353 -		 */
  16.354 -
  16.355 -		count = MIN(count, 30);
  16.356 -		memset(buf, 0x0, count);
  16.357 -		return count;
  16.358 -	}
  16.359 -	/*
  16.360 -	 * Check whether something is in the responselist and if
  16.361 -	 * there's nothing in the list wait for something to appear.
  16.362 -	 */
  16.363 -
  16.364 -	if (NULL == dataex.current_response) {
  16.365 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  16.366 -		interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
  16.367 -		                               1000);
  16.368 -		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
  16.369 -	}
  16.370 -
  16.371 -	if (NULL != dataex.current_response) {
  16.372 -		struct transmission *t = dataex.current_response;
  16.373 -		dataex.current_response = NULL;
  16.374 -		rc = MIN(count, t->buffersize);
  16.375 -		memcpy(buf, t->rcv_buffer, rc);
  16.376 -		transmission_free(t);
  16.377 -	}
  16.378 -
  16.379 -	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  16.380 -	return rc;
  16.381 -}
  16.382 -
  16.383 -static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
  16.384 +static struct tpm_private *tpm_private_get(void)
  16.385  {
  16.386 -	/*
  16.387 -	 * We simply pass the packet onto the XEN shared
  16.388 -	 * memory driver.
  16.389 -	 */
  16.390 -	unsigned long flags;
  16.391 -	int rc;
  16.392 -	struct transmission *t = transmission_alloc();
  16.393 -
  16.394 -	spin_lock_irqsave(&dataex.req_list_lock, flags);
  16.395 -	/*
  16.396 -	 * If there's a current request, it must be the
  16.397 -	 * previous request that has timed out.
  16.398 -	 */
  16.399 -	if (dataex.current_request != NULL) {
  16.400 -		printk("WARNING: Sending although there is a request outstanding.\n"
  16.401 -		       "         Previous request must have timed out.\n");
  16.402 -		transmission_free(dataex.current_request);
  16.403 -		dataex.current_request = NULL;
  16.404 -	}
  16.405 -
  16.406 -	if (t != NULL) {
  16.407 -		unsigned int error = 0;
  16.408 -		/*
  16.409 -		 * Queue the packet if the driver below is not
  16.410 -		 * ready, yet, or there is any packet already
  16.411 -		 * in the queue.
  16.412 -		 * If the driver below is ready, unqueue all
  16.413 -		 * packets first before sending our current
  16.414 -		 * packet.
  16.415 -		 * For each unqueued packet, except for the
  16.416 -		 * last (=current) packet, call the function
  16.417 -		 * tpm_xen_recv to wait for the response to come
  16.418 -		 * back.
  16.419 -		 */
  16.420 -		if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
  16.421 -			if (time_after(jiffies, disconnect_time + HZ * 10)) {
  16.422 -				rc = -ENOENT;
  16.423 -			} else {
  16.424 -				/*
  16.425 -				 * copy the request into the buffer
  16.426 -				 */
  16.427 -				if (transmission_set_buffer(t, buf, count)
  16.428 -				    == NULL) {
  16.429 -					transmission_free(t);
  16.430 -					rc = -ENOMEM;
  16.431 -					goto exit;
  16.432 -				}
  16.433 -				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
  16.434 -				list_add_tail(&t->next, &dataex.queued_requests);
  16.435 -				rc = 0;
  16.436 -			}
  16.437 -		} else {
  16.438 -			/*
  16.439 -			 * Check whether there are any packets in the queue
  16.440 -			 */
  16.441 -			while (!list_empty(&dataex.queued_requests)) {
  16.442 -				/*
  16.443 -				 * Need to dequeue them.
  16.444 -				 * Read the result into a dummy buffer.
  16.445 -				 */
  16.446 -				unsigned char buffer[1];
  16.447 -				struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
  16.448 -				list_del(&qt->next);
  16.449 -				dataex.current_request = qt;
  16.450 -				spin_unlock_irqrestore(&dataex.req_list_lock,
  16.451 -				                       flags);
  16.452 -
  16.453 -				rc = tpm_fe_send(tpmfe.tpm_private,
  16.454 -				                 qt->request,
  16.455 -				                 qt->request_len,
  16.456 -				                 qt);
  16.457 -
  16.458 -				if (rc < 0) {
  16.459 -					spin_lock_irqsave(&dataex.req_list_lock, flags);
  16.460 -					if ((qt = dataex.current_request) != NULL) {
  16.461 -						/*
  16.462 -						 * requeue it at the beginning
  16.463 -						 * of the list
  16.464 -						 */
  16.465 -						list_add(&qt->next,
  16.466 -						         &dataex.queued_requests);
  16.467 -					}
  16.468 -					dataex.current_request = NULL;
  16.469 -					error = 1;
  16.470 -					break;
  16.471 -				}
  16.472 -				/*
  16.473 -				 * After this point qt is not valid anymore!
  16.474 -				 * It is freed when the front-end is delivering the data
  16.475 -				 * by calling tpm_recv
  16.476 -				 */
  16.477 -
  16.478 -				/*
  16.479 -				 * Try to receive the response now into the provided dummy
  16.480 -				 * buffer (I don't really care about this response since
  16.481 -				 * there is no receiver anymore for this response)
  16.482 -				 */
  16.483 -				rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
  16.484 -
  16.485 -				spin_lock_irqsave(&dataex.req_list_lock, flags);
  16.486 -			}
  16.487 -
  16.488 -			if (error == 0) {
  16.489 -				/*
  16.490 -				 * Finally, send the current request.
  16.491 -				 */
  16.492 -				dataex.current_request = t;
  16.493 -				/*
  16.494 -				 * Call the shared memory driver
  16.495 -				 * Pass to it the buffer with the request, the
  16.496 -				 * amount of bytes in the request and
  16.497 -				 * a void * pointer (here: transmission structure)
  16.498 -				 */
  16.499 -				rc = tpm_fe_send(tpmfe.tpm_private,
  16.500 -				                 buf, count, t);
  16.501 -				/*
  16.502 -				 * The generic TPM driver will call
  16.503 -				 * the function to receive the response.
  16.504 -				 */
  16.505 -				if (rc < 0) {
  16.506 -					dataex.current_request = NULL;
  16.507 -					goto queue_it;
  16.508 -				}
  16.509 -			} else {
  16.510 -queue_it:
  16.511 -				if (transmission_set_buffer(t, buf, count) == NULL) {
  16.512 -					transmission_free(t);
  16.513 -					rc = -ENOMEM;
  16.514 -					goto exit;
  16.515 -				}
  16.516 -				/*
  16.517 -				 * An error occurred. Don't event try
  16.518 -				 * to send the current request. Just
  16.519 -				 * queue it.
  16.520 -				 */
  16.521 -				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
  16.522 -				list_add_tail(&t->next,
  16.523 -				              &dataex.queued_requests);
  16.524 -				rc = 0;
  16.525 +	int err;
  16.526 +	if (!my_priv) {
  16.527 +		my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
  16.528 +		if (my_priv) {
  16.529 +			tpm_private_init(my_priv);
  16.530 +			err = tpmif_allocate_tx_buffers(my_priv);
  16.531 +			if (err < 0) {
  16.532 +				tpm_private_put();
  16.533  			}
  16.534  		}
  16.535  	} else {
  16.536 -		rc = -ENOMEM;
  16.537 +		atomic_inc(&my_priv->refcnt);
  16.538  	}
  16.539 +	return my_priv;
  16.540 +}
  16.541 +
  16.542 +/**************************************************************
  16.543 +
  16.544 + The interface to let the tpm plugin register its callback
  16.545 + function and send data to another partition using this module
  16.546 +
  16.547 +**************************************************************/
  16.548  
  16.549 -exit:
  16.550 -	spin_unlock_irqrestore(&dataex.req_list_lock, flags);
  16.551 -	return rc;
  16.552 +static DEFINE_MUTEX(suspend_lock);
  16.553 +/*
  16.554 + * Send data via this module by calling this function
  16.555 + */
  16.556 +int vtpm_vd_send(struct tpm_chip *chip,
  16.557 +                 struct tpm_private *tp,
  16.558 +                 const u8 * buf, size_t count, void *ptr)
  16.559 +{
  16.560 +	int sent;
  16.561 +
  16.562 +	mutex_lock(&suspend_lock);
  16.563 +	sent = tpm_xmit(tp, buf, count, 0, ptr);
  16.564 +	mutex_unlock(&suspend_lock);
  16.565 +
  16.566 +	return sent;
  16.567  }
  16.568  
  16.569 -static void tpm_xen_cancel(struct tpm_chip *chip)
  16.570 +/**************************************************************
  16.571 + XENBUS support code
  16.572 +**************************************************************/
  16.573 +
  16.574 +static int setup_tpmring(struct xenbus_device *dev,
  16.575 +                         struct tpm_private *tp)
  16.576  {
  16.577 -	unsigned long flags;
  16.578 -	spin_lock_irqsave(&dataex.resp_list_lock,flags);
  16.579 +	tpmif_tx_interface_t *sring;
  16.580 +	int err;
  16.581 +
  16.582 +	tp->ring_ref = GRANT_INVALID_REF;
  16.583 +
  16.584 +	sring = (void *)__get_free_page(GFP_KERNEL);
  16.585 +	if (!sring) {
  16.586 +		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  16.587 +		return -ENOMEM;
  16.588 +	}
  16.589 +	tp->tx = sring;
  16.590 +
  16.591 +	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
  16.592 +	if (err < 0) {
  16.593 +		free_page((unsigned long)sring);
  16.594 +		tp->tx = NULL;
  16.595 +		xenbus_dev_fatal(dev, err, "allocating grant reference");
  16.596 +		goto fail;
  16.597 +	}
  16.598 +	tp->ring_ref = err;
  16.599  
  16.600 -	dataex.req_cancelled = dataex.current_request;
  16.601 +	err = tpmif_connect(dev, tp, dev->otherend_id);
  16.602 +	if (err)
  16.603 +		goto fail;
  16.604 +
  16.605 +	return 0;
  16.606 +fail:
  16.607 +	destroy_tpmring(tp);
  16.608 +	return err;
  16.609 +}
  16.610 +
  16.611  
  16.612 -	spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
  16.613 +static void destroy_tpmring(struct tpm_private *tp)
  16.614 +{
  16.615 +	tpmif_set_connected_state(tp, 0);
  16.616 +
  16.617 +	if (tp->ring_ref != GRANT_INVALID_REF) {
  16.618 +		gnttab_end_foreign_access(tp->ring_ref, 0,
  16.619 +					  (unsigned long)tp->tx);
  16.620 +		tp->ring_ref = GRANT_INVALID_REF;
  16.621 +		tp->tx = NULL;
  16.622 +	}
  16.623 +
  16.624 +	if (tp->irq)
  16.625 +		unbind_from_irqhandler(tp->irq, tp);
  16.626 +
  16.627 +	tp->evtchn = tp->irq = 0;
  16.628  }
  16.629  
  16.630 -static u8 tpm_xen_status(struct tpm_chip *chip)
  16.631 +
  16.632 +static int talk_to_backend(struct xenbus_device *dev,
  16.633 +                           struct tpm_private *tp)
  16.634 +{
  16.635 +	const char *message = NULL;
  16.636 +	int err;
  16.637 +	xenbus_transaction_t xbt;
  16.638 +
  16.639 +	err = setup_tpmring(dev, tp);
  16.640 +	if (err) {
  16.641 +		xenbus_dev_fatal(dev, err, "setting up ring");
  16.642 +		goto out;
  16.643 +	}
  16.644 +
  16.645 +again:
  16.646 +	err = xenbus_transaction_start(&xbt);
  16.647 +	if (err) {
  16.648 +		xenbus_dev_fatal(dev, err, "starting transaction");
  16.649 +		goto destroy_tpmring;
  16.650 +	}
  16.651 +
  16.652 +	err = xenbus_printf(xbt, dev->nodename,
  16.653 +	                    "ring-ref","%u", tp->ring_ref);
  16.654 +	if (err) {
  16.655 +		message = "writing ring-ref";
  16.656 +		goto abort_transaction;
  16.657 +	}
  16.658 +
  16.659 +	err = xenbus_printf(xbt, dev->nodename,
  16.660 +			    "event-channel", "%u", tp->evtchn);
  16.661 +	if (err) {
  16.662 +		message = "writing event-channel";
  16.663 +		goto abort_transaction;
  16.664 +	}
  16.665 +
  16.666 +	err = xenbus_transaction_end(xbt, 0);
  16.667 +	if (err == -EAGAIN)
  16.668 +		goto again;
  16.669 +	if (err) {
  16.670 +		xenbus_dev_fatal(dev, err, "completing transaction");
  16.671 +		goto destroy_tpmring;
  16.672 +	}
  16.673 +
  16.674 +	xenbus_switch_state(dev, XenbusStateConnected);
  16.675 +
  16.676 +	return 0;
  16.677 +
  16.678 +abort_transaction:
  16.679 +	xenbus_transaction_end(xbt, 1);
  16.680 +	if (message)
  16.681 +		xenbus_dev_error(dev, err, "%s", message);
  16.682 +destroy_tpmring:
  16.683 +	destroy_tpmring(tp);
  16.684 +out:
  16.685 +	return err;
  16.686 +}
  16.687 +
  16.688 +/**
  16.689 + * Callback received when the backend's state changes.
  16.690 + */
  16.691 +static void backend_changed(struct xenbus_device *dev,
  16.692 +			    XenbusState backend_state)
  16.693  {
  16.694 -	unsigned long flags;
  16.695 -	u8 rc = 0;
  16.696 -	spin_lock_irqsave(&dataex.resp_list_lock, flags);
  16.697 -	/*
  16.698 -	 * Data are available if:
  16.699 -	 *  - there's a current response
  16.700 -	 *  - the last packet was queued only (this is fake, but necessary to
  16.701 -	 *      get the generic TPM layer to call the receive function.)
  16.702 -	 */
  16.703 -	if (NULL != dataex.current_response ||
  16.704 -	    0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
  16.705 -		rc = STATUS_DATA_AVAIL;
  16.706 +	struct tpm_private *tp = dev->data;
  16.707 +	DPRINTK("\n");
  16.708 +
  16.709 +	switch (backend_state) {
  16.710 +	case XenbusStateInitialising:
  16.711 +	case XenbusStateInitWait:
  16.712 +	case XenbusStateInitialised:
  16.713 +	case XenbusStateUnknown:
  16.714 +		break;
  16.715 +
  16.716 +	case XenbusStateConnected:
  16.717 +		tpmif_set_connected_state(tp, 1);
  16.718 +		break;
  16.719 +
  16.720 +	case XenbusStateClosing:
  16.721 +		tpmif_set_connected_state(tp, 0);
  16.722 +		break;
  16.723 +
  16.724 +	case XenbusStateClosed:
  16.725 +		if (tp->is_suspended == 0) {
  16.726 +			device_unregister(&dev->dev);
  16.727 +		}
  16.728 +		xenbus_switch_state(dev, XenbusStateClosed);
  16.729 +		break;
  16.730 +	}
  16.731 +}
  16.732 +
  16.733 +
  16.734 +static int tpmfront_probe(struct xenbus_device *dev,
  16.735 +                          const struct xenbus_device_id *id)
  16.736 +{
  16.737 +	int err;
  16.738 +	int handle;
  16.739 +	struct tpm_private *tp = tpm_private_get();
  16.740 +
  16.741 +	if (!tp)
  16.742 +		return -ENOMEM;
  16.743 +
  16.744 +	err = xenbus_scanf(XBT_NULL, dev->nodename,
  16.745 +	                   "handle", "%i", &handle);
  16.746 +	if (XENBUS_EXIST_ERR(err))
  16.747 +		return err;
  16.748 +
  16.749 +	if (err < 0) {
  16.750 +		xenbus_dev_fatal(dev,err,"reading virtual-device");
  16.751 +		return err;
  16.752  	}
  16.753 -	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  16.754 -	return rc;
  16.755 +
  16.756 +	tp->dev = dev;
  16.757 +	dev->data = tp;
  16.758 +
  16.759 +	err = talk_to_backend(dev, tp);
  16.760 +	if (err) {
  16.761 +		tpm_private_put();
  16.762 +		dev->data = NULL;
  16.763 +		return err;
  16.764 +	}
  16.765 +	return 0;
  16.766 +}
  16.767 +
  16.768 +
  16.769 +static int tpmfront_remove(struct xenbus_device *dev)
  16.770 +{
  16.771 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
  16.772 +	destroy_tpmring(tp);
  16.773 +	return 0;
  16.774 +}
  16.775 +
  16.776 +static int tpmfront_suspend(struct xenbus_device *dev)
  16.777 +{
  16.778 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
  16.779 +	u32 ctr;
  16.780 +
  16.781 +	/* lock, so no app can send */
  16.782 +	mutex_lock(&suspend_lock);
  16.783 +	tp->is_suspended = 1;
  16.784 +
  16.785 +	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
  16.786 +		if ((ctr % 10) == 0)
  16.787 +			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
  16.788 +		/*
  16.789 +		 * Wait for a request to be responded to.
  16.790 +		 */
  16.791 +		interruptible_sleep_on_timeout(&tp->wait_q, 100);
  16.792 +	}
  16.793 +	xenbus_switch_state(dev, XenbusStateClosed);
  16.794 +
  16.795 +	if (atomic_read(&tp->tx_busy)) {
  16.796 +		/*
  16.797 +		 * A temporary work-around.
  16.798 +		 */
  16.799 +		printk("TPM-FE [WARNING]: Resetting busy flag.");
  16.800 +		atomic_set(&tp->tx_busy, 0);
  16.801 +	}
  16.802 +
  16.803 +	return 0;
  16.804 +}
  16.805 +
  16.806 +static int tpmfront_resume(struct xenbus_device *dev)
  16.807 +{
  16.808 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
  16.809 +	destroy_tpmring(tp);
  16.810 +	return talk_to_backend(dev, tp);
  16.811  }
  16.812  
  16.813 -static struct file_operations tpm_xen_ops = {
  16.814 -	.owner = THIS_MODULE,
  16.815 -	.llseek = no_llseek,
  16.816 -	.open = tpm_open,
  16.817 -	.read = tpm_read,
  16.818 -	.write = tpm_write,
  16.819 -	.release = tpm_release,
  16.820 +static int tpmif_connect(struct xenbus_device *dev,
  16.821 +                         struct tpm_private *tp,
  16.822 +                         domid_t domid)
  16.823 +{
  16.824 +	int err;
  16.825 +
  16.826 +	tp->backend_id = domid;
  16.827 +
  16.828 +	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
  16.829 +	if (err)
  16.830 +		return err;
  16.831 +
  16.832 +	err = bind_evtchn_to_irqhandler(tp->evtchn,
  16.833 +					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
  16.834 +					tp);
  16.835 +	if (err <= 0) {
  16.836 +		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  16.837 +		return err;
  16.838 +	}
  16.839 +
  16.840 +	tp->irq = err;
  16.841 +	return 0;
  16.842 +}
  16.843 +
  16.844 +static struct xenbus_device_id tpmfront_ids[] = {
  16.845 +	{ "vtpm" },
  16.846 +	{ "" }
  16.847  };
  16.848  
  16.849 -static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  16.850 -static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
  16.851 -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
  16.852 -static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
  16.853 -
  16.854 -static struct attribute* xen_attrs[] = {
  16.855 -	&dev_attr_pubek.attr,
  16.856 -	&dev_attr_pcrs.attr,
  16.857 -	&dev_attr_caps.attr,
  16.858 -	&dev_attr_cancel.attr,
  16.859 -	NULL,
  16.860 +static struct xenbus_driver tpmfront = {
  16.861 +	.name = "vtpm",
  16.862 +	.owner = THIS_MODULE,
  16.863 +	.ids = tpmfront_ids,
  16.864 +	.probe = tpmfront_probe,
  16.865 +	.remove =  tpmfront_remove,
  16.866 +	.resume = tpmfront_resume,
  16.867 +	.otherend_changed = backend_changed,
  16.868 +	.suspend = tpmfront_suspend,
  16.869  };
  16.870  
  16.871 -static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
  16.872 +static void __init init_tpm_xenbus(void)
  16.873 +{
  16.874 +	xenbus_register_frontend(&tpmfront);
  16.875 +}
  16.876 +
  16.877 +static void __exit exit_tpm_xenbus(void)
  16.878 +{
  16.879 +	xenbus_unregister_driver(&tpmfront);
  16.880 +}
  16.881 +
  16.882 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
  16.883 +{
  16.884 +	unsigned int i;
  16.885 +
  16.886 +	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  16.887 +		tp->tx_buffers[i] = tx_buffer_alloc();
  16.888 +		if (!tp->tx_buffers[i]) {
  16.889 +			tpmif_free_tx_buffers(tp);
  16.890 +			return -ENOMEM;
  16.891 +		}
  16.892 +	}
  16.893 +	return 0;
  16.894 +}
  16.895 +
  16.896 +static void tpmif_free_tx_buffers(struct tpm_private *tp)
  16.897 +{
  16.898 +	unsigned int i;
  16.899 +
  16.900 +	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  16.901 +		tx_buffer_free(tp->tx_buffers[i]);
  16.902 +	}
  16.903 +}
  16.904 +
  16.905 +static void tpmif_rx_action(unsigned long priv)
  16.906 +{
  16.907 +	struct tpm_private *tp = (struct tpm_private *)priv;
  16.908 +
  16.909 +	int i = 0;
  16.910 +	unsigned int received;
  16.911 +	unsigned int offset = 0;
  16.912 +	u8 *buffer;
  16.913 +	tpmif_tx_request_t *tx;
  16.914 +	tx = &tp->tx->ring[i].req;
  16.915 +
  16.916 +	atomic_set(&tp->tx_busy, 0);
  16.917 +	wake_up_interruptible(&tp->wait_q);
  16.918 +
  16.919 +	received = tx->size;
  16.920 +
  16.921 +	buffer = kmalloc(received, GFP_ATOMIC);
  16.922 +	if (NULL == buffer) {
  16.923 +		goto exit;
  16.924 +	}
  16.925 +
  16.926 +	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
  16.927 +		struct tx_buffer *txb = tp->tx_buffers[i];
  16.928 +		tpmif_tx_request_t *tx;
  16.929 +		unsigned int tocopy;
  16.930 +
  16.931 +		tx = &tp->tx->ring[i].req;
  16.932 +		tocopy = tx->size;
  16.933 +		if (tocopy > PAGE_SIZE) {
  16.934 +			tocopy = PAGE_SIZE;
  16.935 +		}
  16.936 +
  16.937 +		memcpy(&buffer[offset], txb->data, tocopy);
  16.938 +
  16.939 +		gnttab_release_grant_reference(&gref_head, tx->ref);
  16.940 +
  16.941 +		offset += tocopy;
  16.942 +	}
  16.943 +
  16.944 +	vtpm_vd_recv(buffer, received, tp->tx_remember);
  16.945 +	kfree(buffer);
  16.946 +
  16.947 +exit:
  16.948 +
  16.949 +	return;
  16.950 +}
  16.951 +
  16.952 +
  16.953 +static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
  16.954 +{
  16.955 +	struct tpm_private *tp = tpm_priv;
  16.956 +	unsigned long flags;
  16.957 +
  16.958 +	spin_lock_irqsave(&tp->tx_lock, flags);
  16.959 +	tpmif_rx_tasklet.data = (unsigned long)tp;
  16.960 +	tasklet_schedule(&tpmif_rx_tasklet);
  16.961 +	spin_unlock_irqrestore(&tp->tx_lock, flags);
  16.962 +
  16.963 +	return IRQ_HANDLED;
  16.964 +}
  16.965 +
  16.966 +
  16.967 +static int tpm_xmit(struct tpm_private *tp,
  16.968 +                    const u8 * buf, size_t count, int isuserbuffer,
  16.969 +                    void *remember)
  16.970 +{
  16.971 +	tpmif_tx_request_t *tx;
  16.972 +	TPMIF_RING_IDX i;
  16.973 +	unsigned int offset = 0;
  16.974 +
  16.975 +	spin_lock_irq(&tp->tx_lock);
  16.976 +
  16.977 +	if (unlikely(atomic_read(&tp->tx_busy))) {
  16.978 +		printk("tpm_xmit: There's an outstanding request/response "
  16.979 +		       "on the way!\n");
  16.980 +		spin_unlock_irq(&tp->tx_lock);
  16.981 +		return -EBUSY;
  16.982 +	}
  16.983  
  16.984 -static struct tpm_vendor_specific tpm_xen = {
  16.985 -	.recv = tpm_xen_recv,
  16.986 -	.send = tpm_xen_send,
  16.987 -	.cancel = tpm_xen_cancel,
  16.988 -	.status = tpm_xen_status,
  16.989 -	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
  16.990 -	.req_complete_val  = STATUS_DATA_AVAIL,
  16.991 -	.req_canceled = STATUS_READY,
  16.992 -	.base = 0,
  16.993 -	.attr_group = &xen_attr_grp,
  16.994 -	.miscdev.fops = &tpm_xen_ops,
  16.995 -	.buffersize = 64 * 1024,
  16.996 +	if (tp->is_connected != 1) {
  16.997 +		spin_unlock_irq(&tp->tx_lock);
  16.998 +		return -EIO;
  16.999 +	}
 16.1000 +
 16.1001 +	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
 16.1002 +		struct tx_buffer *txb = tp->tx_buffers[i];
 16.1003 +		int copied;
 16.1004 +
 16.1005 +		if (NULL == txb) {
 16.1006 +			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
 16.1007 +				"Not transmitting anything!\n", i);
 16.1008 +			spin_unlock_irq(&tp->tx_lock);
 16.1009 +			return -EFAULT;
 16.1010 +		}
 16.1011 +		copied = tx_buffer_copy(txb, &buf[offset], count,
 16.1012 +		                        isuserbuffer);
 16.1013 +		if (copied < 0) {
 16.1014 +			/* An error occurred */
 16.1015 +			spin_unlock_irq(&tp->tx_lock);
 16.1016 +			return copied;
 16.1017 +		}
 16.1018 +		count -= copied;
 16.1019 +		offset += copied;
 16.1020 +
 16.1021 +		tx = &tp->tx->ring[i].req;
 16.1022 +
 16.1023 +		tx->addr = virt_to_machine(txb->data);
 16.1024 +		tx->size = txb->len;
 16.1025 +
 16.1026 +		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
 16.1027 +		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
 16.1028 +
 16.1029 +		/* get the granttable reference for this page */
 16.1030 +		tx->ref = gnttab_claim_grant_reference(&gref_head);
 16.1031 +
 16.1032 +		if (-ENOSPC == tx->ref) {
 16.1033 +			spin_unlock_irq(&tp->tx_lock);
 16.1034 +			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
 16.1035 +			return -ENOSPC;
 16.1036 +		}
 16.1037 +		gnttab_grant_foreign_access_ref( tx->ref,
 16.1038 +		                                 tp->backend_id,
 16.1039 +		                                 (tx->addr >> PAGE_SHIFT),
 16.1040 +		                                 0 /*RW*/);
 16.1041 +		wmb();
 16.1042 +	}
 16.1043 +
 16.1044 +	atomic_set(&tp->tx_busy, 1);
 16.1045 +	tp->tx_remember = remember;
 16.1046 +	mb();
 16.1047 +
 16.1048 +	DPRINTK("Notifying backend via event channel %d\n",
 16.1049 +	        tp->evtchn);
 16.1050 +
 16.1051 +	notify_remote_via_irq(tp->irq);
 16.1052 +
 16.1053 +	spin_unlock_irq(&tp->tx_lock);
 16.1054 +	return offset;
 16.1055 +}
 16.1056 +
 16.1057 +
 16.1058 +static void tpmif_notify_upperlayer(struct tpm_private *tp)
 16.1059 +{
 16.1060 +	/*
 16.1061 +	 * Notify upper layer about the state of the connection
 16.1062 +	 * to the BE.
 16.1063 +	 */
 16.1064 +	if (tp->is_connected) {
 16.1065 +		vtpm_vd_status(TPM_VD_STATUS_CONNECTED);
 16.1066 +	} else {
 16.1067 +		vtpm_vd_status(TPM_VD_STATUS_DISCONNECTED);
 16.1068 +	}
 16.1069 +}
 16.1070 +
 16.1071 +
 16.1072 +static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
 16.1073 +{
 16.1074 +	/*
 16.1075 +	 * Don't notify upper layer if we are in suspend mode and
 16.1076 +	 * should disconnect - assumption is that we will resume
 16.1077 +	 * The mutex keeps apps from sending.
 16.1078 +	 */
 16.1079 +	if (is_connected == 0 && tp->is_suspended == 1) {
 16.1080 +		return;
 16.1081 +	}
 16.1082 +
 16.1083 +	/*
 16.1084 +	 * Unlock the mutex if we are connected again
 16.1085 +	 * after being suspended - now resuming.
 16.1086 +	 * This also removes the suspend state.
 16.1087 +	 */
 16.1088 +	if (is_connected == 1 && tp->is_suspended == 1) {
 16.1089 +		tp->is_suspended = 0;
 16.1090 +		/* unlock, so apps can resume sending */
 16.1091 +		mutex_unlock(&suspend_lock);
 16.1092 +	}
 16.1093 +
 16.1094 +	if (is_connected != tp->is_connected) {
 16.1095 +		tp->is_connected = is_connected;
 16.1096 +		tpmif_notify_upperlayer(tp);
 16.1097 +	}
 16.1098 +}
 16.1099 +
 16.1100 +
 16.1101 +
 16.1102 +/* =================================================================
 16.1103 + * Initialization function.
 16.1104 + * =================================================================
 16.1105 + */
 16.1106 +
 16.1107 +struct tpm_virtual_device tvd = {
 16.1108 +	.max_tx_size = PAGE_SIZE * TPMIF_TX_RING_SIZE,
 16.1109  };
 16.1110  
 16.1111 -static struct platform_device *pdev;
 16.1112 -
 16.1113 -static struct tpmfe_device tpmfe = {
 16.1114 -	.receive = tpm_recv,
 16.1115 -	.status  = tpm_fe_status,
 16.1116 -};
 16.1117 -
 16.1118 -
 16.1119 -static int __init init_xen(void)
 16.1120 +static int __init tpmif_init(void)
 16.1121  {
 16.1122  	int rc;
 16.1123 +	struct tpm_private *tp;
 16.1124  
 16.1125  	if ((xen_start_info->flags & SIF_INITDOMAIN)) {
 16.1126  		return -EPERM;
 16.1127  	}
 16.1128 -	/*
 16.1129 -	 * Register device with the low lever front-end
 16.1130 -	 * driver
 16.1131 -	 */
 16.1132 -	if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
 16.1133 -		goto err_exit;
 16.1134 -	}
 16.1135  
 16.1136 -	/*
 16.1137 -	 * Register our device with the system.
 16.1138 -	 */
 16.1139 -	pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
 16.1140 -	if (IS_ERR(pdev)) {
 16.1141 -		rc = PTR_ERR(pdev);
 16.1142 -		goto err_unreg_fe;
 16.1143 +	tp = tpm_private_get();
 16.1144 +	if (!tp) {
 16.1145 +		rc = -ENOMEM;
 16.1146 +		goto failexit;
 16.1147  	}
 16.1148  
 16.1149 -	tpm_xen.buffersize = tpmfe.max_tx_size;
 16.1150 +	tvd.tpm_private = tp;
 16.1151 +	rc = init_vtpm(&tvd);
 16.1152 +	if (rc)
 16.1153 +		goto init_vtpm_failed;
 16.1154  
 16.1155 -	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_xen)) < 0) {
 16.1156 -		goto err_unreg_pdev;
 16.1157 +	IPRINTK("Initialising the vTPM driver.\n");
 16.1158 +	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
 16.1159 +	                                     &gref_head ) < 0) {
 16.1160 +		rc = -EFAULT;
 16.1161 +		goto gnttab_alloc_failed;
 16.1162  	}
 16.1163  
 16.1164 -	dataex.current_request = NULL;
 16.1165 -	spin_lock_init(&dataex.req_list_lock);
 16.1166 -	init_waitqueue_head(&dataex.req_wait_queue);
 16.1167 -	INIT_LIST_HEAD(&dataex.queued_requests);
 16.1168 -
 16.1169 -	dataex.current_response = NULL;
 16.1170 -	spin_lock_init(&dataex.resp_list_lock);
 16.1171 -	init_waitqueue_head(&dataex.resp_wait_queue);
 16.1172 -
 16.1173 -	disconnect_time = jiffies;
 16.1174 -
 16.1175 +	init_tpm_xenbus();
 16.1176  	return 0;
 16.1177  
 16.1178 +gnttab_alloc_failed:
 16.1179 +	cleanup_vtpm();
 16.1180 +init_vtpm_failed:
 16.1181 +	tpm_private_put();
 16.1182 +failexit:
 16.1183  
 16.1184 -err_unreg_pdev:
 16.1185 -	platform_device_unregister(pdev);
 16.1186 -err_unreg_fe:
 16.1187 -	tpm_fe_unregister_receiver();
 16.1188 -
 16.1189 -err_exit:
 16.1190  	return rc;
 16.1191  }
 16.1192  
 16.1193 -static void __exit cleanup_xen(void)
 16.1194 +
 16.1195 +static void __exit tpmif_exit(void)
 16.1196  {
 16.1197 -	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
 16.1198 -	if (chip) {
 16.1199 -		tpm_remove_hardware(chip->dev);
 16.1200 -		platform_device_unregister(pdev);
 16.1201 -		tpm_fe_unregister_receiver();
 16.1202 -	}
 16.1203 +	cleanup_vtpm();
 16.1204 +	tpm_private_put();
 16.1205 +	exit_tpm_xenbus();
 16.1206 +	gnttab_free_grant_references(gref_head);
 16.1207  }
 16.1208  
 16.1209 -module_init(init_xen);
 16.1210 -module_exit(cleanup_xen);
 16.1211 +module_init(tpmif_init);
 16.1212 +module_exit(tpmif_exit);
 16.1213 +
 16.1214 +MODULE_LICENSE("Dual BSD/GPL");
 16.1215  
 16.1216 -MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
 16.1217 -MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
 16.1218 -MODULE_VERSION("1.0");
 16.1219 -MODULE_LICENSE("GPL");
 16.1220 +/*
 16.1221 + * Local variables:
 16.1222 + *  c-file-style: "linux"
 16.1223 + *  indent-tabs-mode: t
 16.1224 + *  c-indent-level: 8
 16.1225 + *  c-basic-offset: 8
 16.1226 + *  tab-width: 8
 16.1227 + * End:
 16.1228 + */
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/Kconfig	Tue May 02 18:17:59 2006 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig	Thu May 04 14:19:19 2006 +0100
    17.3 @@ -173,14 +173,6 @@ config XEN_BLKDEV_TAP
    17.4  	  to a character device, allowing device prototyping in application
    17.5  	  space.  Odds are that you want to say N here.
    17.6  
    17.7 -config XEN_TPMDEV_FRONTEND
    17.8 -	tristate "TPM-device frontend driver"
    17.9 -	default n
   17.10 -	select TCG_TPM
   17.11 -	select TCG_XEN
   17.12 -	help
   17.13 -	  The TPM-device frontend driver.
   17.14 -
   17.15  config XEN_SCRUB_PAGES
   17.16  	bool "Scrub memory before freeing it to Xen"
   17.17  	default y
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/Makefile	Tue May 02 18:17:59 2006 +0100
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Makefile	Thu May 04 14:19:19 2006 +0100
    18.3 @@ -16,7 +16,6 @@ obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmb
    18.4  obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
    18.5  obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
    18.6  obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
    18.7 -obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront/
    18.8  obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
    18.9  obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront/
   18.10  
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Tue May 02 18:17:59 2006 +0100
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu May 04 14:19:19 2006 +0100
    19.3 @@ -50,6 +50,8 @@ typedef struct tpmif_st {
    19.4  	grant_handle_t shmem_handle;
    19.5  	grant_ref_t shmem_ref;
    19.6  	struct page *pagerange;
    19.7 +
    19.8 +	char devname[20];
    19.9  } tpmif_t;
   19.10  
   19.11  void tpmif_disconnect_complete(tpmif_t * tpmif);
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Tue May 02 18:17:59 2006 +0100
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Thu May 04 14:19:19 2006 +0100
    20.3 @@ -32,6 +32,7 @@ static tpmif_t *alloc_tpmif(domid_t domi
    20.4  	tpmif->domid = domid;
    20.5  	tpmif->status = DISCONNECTED;
    20.6  	tpmif->tpm_instance = instance;
    20.7 +	snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
    20.8  	atomic_set(&tpmif->refcnt, 1);
    20.9  
   20.10  	tpmif->pagerange = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
   20.11 @@ -144,7 +145,7 @@ int tpmif_map(tpmif_t *tpmif, unsigned l
   20.12  	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
   20.13  
   20.14  	tpmif->irq = bind_evtchn_to_irqhandler(
   20.15 -		tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
   20.16 +		tpmif->evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
   20.17  	tpmif->shmem_ref = shared_page;
   20.18  	tpmif->active = 1;
   20.19  
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Tue May 02 18:17:59 2006 +0100
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu May 04 14:19:19 2006 +0100
    21.3 @@ -28,7 +28,8 @@ struct data_exchange {
    21.4  	struct list_head pending_pak;
    21.5  	struct list_head current_pak;
    21.6  	unsigned int copied_so_far;
    21.7 -	u8 has_opener;
    21.8 +	u8 has_opener:1;
    21.9 +	u8 aborted:1;
   21.10  	rwlock_t pak_lock;	// protects all of the previous fields
   21.11  	wait_queue_head_t wait_queue;
   21.12  };
   21.13 @@ -101,6 +102,16 @@ static inline int copy_to_buffer(void *t
   21.14  	return 0;
   21.15  }
   21.16  
   21.17 +
   21.18 +static void dataex_init(struct data_exchange *dataex)
   21.19 +{
   21.20 +	INIT_LIST_HEAD(&dataex->pending_pak);
   21.21 +	INIT_LIST_HEAD(&dataex->current_pak);
   21.22 +	dataex->has_opener = 0;
   21.23 +	rwlock_init(&dataex->pak_lock);
   21.24 +	init_waitqueue_head(&dataex->wait_queue);
   21.25 +}
   21.26 +
   21.27  /***************************************************************
   21.28   Packet-related functions
   21.29  ***************************************************************/
   21.30 @@ -148,11 +159,12 @@ static struct packet *packet_alloc(tpmif
   21.31  				   u32 size, u8 req_tag, u8 flags)
   21.32  {
   21.33  	struct packet *pak = NULL;
   21.34 -	pak = kzalloc(sizeof (struct packet), GFP_KERNEL);
   21.35 +	pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
   21.36  	if (NULL != pak) {
   21.37  		if (tpmif) {
   21.38  			pak->tpmif = tpmif;
   21.39  			pak->tpm_instance = tpmif->tpm_instance;
   21.40 +			tpmif_get(tpmif);
   21.41  		}
   21.42  		pak->data_len = size;
   21.43  		pak->req_tag = req_tag;
   21.44 @@ -180,6 +192,9 @@ static void packet_free(struct packet *p
   21.45  	if (timer_pending(&pak->processing_timer)) {
   21.46  		BUG();
   21.47  	}
   21.48 +
   21.49 +	if (pak->tpmif)
   21.50 +		tpmif_put(pak->tpmif);
   21.51  	kfree(pak->data_buffer);
   21.52  	/*
   21.53  	 * cannot do tpmif_put(pak->tpmif); bad things happen
   21.54 @@ -271,7 +286,6 @@ int _packet_write(struct packet *pak,
   21.55  		struct gnttab_map_grant_ref map_op;
   21.56  		struct gnttab_unmap_grant_ref unmap_op;
   21.57  		tpmif_tx_request_t *tx;
   21.58 -		unsigned long pfn, mfn, mfn_orig;
   21.59  
   21.60  		tx = &tpmif->tx->ring[i].req;
   21.61  
   21.62 @@ -295,12 +309,6 @@ int _packet_write(struct packet *pak,
   21.63  			return 0;
   21.64  		}
   21.65  
   21.66 -		pfn = __pa(MMAP_VADDR(tpmif, i)) >> PAGE_SHIFT;
   21.67 -		mfn = FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT);
   21.68 -		mfn_orig = pfn_to_mfn(pfn);
   21.69 -
   21.70 -		set_phys_to_machine(pfn, mfn);
   21.71 -
   21.72  		tocopy = MIN(size - offset, PAGE_SIZE);
   21.73  
   21.74  		if (copy_from_buffer((void *)(MMAP_VADDR(tpmif, i) |
   21.75 @@ -311,8 +319,6 @@ int _packet_write(struct packet *pak,
   21.76  		}
   21.77  		tx->size = tocopy;
   21.78  
   21.79 -		set_phys_to_machine(pfn, mfn_orig);
   21.80 -
   21.81  		gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
   21.82  				    GNTMAP_host_map, handle);
   21.83  
   21.84 @@ -514,27 +520,41 @@ static ssize_t vtpm_op_read(struct file 
   21.85  	unsigned long flags;
   21.86  
   21.87  	write_lock_irqsave(&dataex.pak_lock, flags);
   21.88 +	if (dataex.aborted) {
   21.89 +		dataex.aborted = 0;
   21.90 +		dataex.copied_so_far = 0;
   21.91 +		write_unlock_irqrestore(&dataex.pak_lock, flags);
   21.92 +		return -EIO;
   21.93 +	}
   21.94  
   21.95  	if (list_empty(&dataex.pending_pak)) {
   21.96  		write_unlock_irqrestore(&dataex.pak_lock, flags);
   21.97  		wait_event_interruptible(dataex.wait_queue,
   21.98  					 !list_empty(&dataex.pending_pak));
   21.99  		write_lock_irqsave(&dataex.pak_lock, flags);
  21.100 +		dataex.copied_so_far = 0;
  21.101  	}
  21.102  
  21.103  	if (!list_empty(&dataex.pending_pak)) {
  21.104  		unsigned int left;
  21.105 +
  21.106  		pak = list_entry(dataex.pending_pak.next, struct packet, next);
  21.107 -
  21.108  		left = pak->data_len - dataex.copied_so_far;
  21.109 +		list_del(&pak->next);
  21.110 +		write_unlock_irqrestore(&dataex.pak_lock, flags);
  21.111  
  21.112  		DPRINTK("size given by app: %d, available: %d\n", size, left);
  21.113  
  21.114  		ret_size = MIN(size, left);
  21.115  
  21.116  		ret_size = packet_read(pak, ret_size, data, size, 1);
  21.117 +
  21.118 +		write_lock_irqsave(&dataex.pak_lock, flags);
  21.119 +
  21.120  		if (ret_size < 0) {
  21.121 -			ret_size = -EFAULT;
  21.122 +			del_singleshot_timer_sync(&pak->processing_timer);
  21.123 +			packet_free(pak);
  21.124 +			dataex.copied_so_far = 0;
  21.125  		} else {
  21.126  			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
  21.127  
  21.128 @@ -545,7 +565,6 @@ static ssize_t vtpm_op_read(struct file 
  21.129  
  21.130  				del_singleshot_timer_sync(&pak->
  21.131  							  processing_timer);
  21.132 -				list_del(&pak->next);
  21.133  				list_add_tail(&pak->next, &dataex.current_pak);
  21.134  				/*
  21.135  				 * The more fontends that are handled at the same time,
  21.136 @@ -554,6 +573,8 @@ static ssize_t vtpm_op_read(struct file 
  21.137  				mod_timer(&pak->processing_timer,
  21.138  					  jiffies + (num_frontends * 60 * HZ));
  21.139  				dataex.copied_so_far = 0;
  21.140 +			} else {
  21.141 +				list_add(&pak->next, &dataex.pending_pak);
  21.142  			}
  21.143  		}
  21.144  	}
  21.145 @@ -601,8 +622,8 @@ static ssize_t vtpm_op_write(struct file
  21.146  
  21.147  	if (pak == NULL) {
  21.148  		write_unlock_irqrestore(&dataex.pak_lock, flags);
  21.149 -		printk(KERN_ALERT "No associated packet! (inst=%d)\n",
  21.150 -		       ntohl(vrh.instance_no));
  21.151 +		DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
  21.152 +		        ntohl(vrh.instance_no));
  21.153  		return -EFAULT;
  21.154  	}
  21.155  
  21.156 @@ -784,15 +805,17 @@ static int tpm_send_fail_message(struct 
  21.157  	return rc;
  21.158  }
  21.159  
  21.160 -static void _vtpm_release_packets(struct list_head *head,
  21.161 -				  tpmif_t * tpmif, int send_msgs)
  21.162 +static int _vtpm_release_packets(struct list_head *head,
  21.163 +				 tpmif_t * tpmif, int send_msgs)
  21.164  {
  21.165 +	int aborted = 0;
  21.166 +	int c = 0;
  21.167  	struct packet *pak;
  21.168 -	struct list_head *pos,
  21.169 -	         *tmp;
  21.170 +	struct list_head *pos, *tmp;
  21.171  
  21.172  	list_for_each_safe(pos, tmp, head) {
  21.173  		pak = list_entry(pos, struct packet, next);
  21.174 +		c += 1;
  21.175  
  21.176  		if (tpmif == NULL || pak->tpmif == tpmif) {
  21.177  			int can_send = 0;
  21.178 @@ -808,8 +831,11 @@ static void _vtpm_release_packets(struct
  21.179  				tpm_send_fail_message(pak, pak->req_tag);
  21.180  			}
  21.181  			packet_free(pak);
  21.182 +			if (c == 1)
  21.183 +				aborted = 1;
  21.184  		}
  21.185  	}
  21.186 +	return aborted;
  21.187  }
  21.188  
  21.189  int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
  21.190 @@ -818,7 +844,9 @@ int vtpm_release_packets(tpmif_t * tpmif
  21.191  
  21.192  	write_lock_irqsave(&dataex.pak_lock, flags);
  21.193  
  21.194 -	_vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
  21.195 +	dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
  21.196 +					       tpmif,
  21.197 +					       send_msgs);
  21.198  	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
  21.199  
  21.200  	write_unlock_irqrestore(&dataex.pak_lock, flags);
  21.201 @@ -1020,11 +1048,7 @@ static int __init tpmback_init(void)
  21.202  		return rc;
  21.203  	}
  21.204  
  21.205 -	INIT_LIST_HEAD(&dataex.pending_pak);
  21.206 -	INIT_LIST_HEAD(&dataex.current_pak);
  21.207 -	dataex.has_opener = 0;
  21.208 -	rwlock_init(&dataex.pak_lock);
  21.209 -	init_waitqueue_head(&dataex.wait_queue);
  21.210 +	dataex_init(&dataex);
  21.211  
  21.212  	spin_lock_init(&tpm_schedule_list_lock);
  21.213  	INIT_LIST_HEAD(&tpm_schedule_list);
  21.214 @@ -1041,6 +1065,7 @@ module_init(tpmback_init);
  21.215  
  21.216  static void __exit tpmback_exit(void)
  21.217  {
  21.218 +	vtpm_release_packets(NULL, 0);
  21.219  	tpmif_xenbus_exit();
  21.220  	tpmif_interface_exit();
  21.221  	misc_deregister(&vtpms_miscdevice);
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile	Tue May 02 18:17:59 2006 +0100
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,2 +0,0 @@
    22.4 -
    22.5 -obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront.o
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Tue May 02 18:17:59 2006 +0100
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,767 +0,0 @@
    23.4 -/*
    23.5 - * Copyright (c) 2005, IBM Corporation
    23.6 - *
    23.7 - * Author: Stefan Berger, stefanb@us.ibm.com
    23.8 - * Grant table support: Mahadevan Gomathisankaran
    23.9 - *
   23.10 - * This code has been derived from drivers/xen/netfront/netfront.c
   23.11 - *
   23.12 - * Copyright (c) 2002-2004, K A Fraser
   23.13 - *
   23.14 - * This program is free software; you can redistribute it and/or
   23.15 - * modify it under the terms of the GNU General Public License version 2
   23.16 - * as published by the Free Software Foundation; or, when distributed
   23.17 - * separately from the Linux kernel or incorporated into other
   23.18 - * software packages, subject to the following license:
   23.19 - * 
   23.20 - * Permission is hereby granted, free of charge, to any person obtaining a copy
   23.21 - * of this source file (the "Software"), to deal in the Software without
   23.22 - * restriction, including without limitation the rights to use, copy, modify,
   23.23 - * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   23.24 - * and to permit persons to whom the Software is furnished to do so, subject to
   23.25 - * the following conditions:
   23.26 - *
   23.27 - * The above copyright notice and this permission notice shall be included in
   23.28 - * all copies or substantial portions of the Software.
   23.29 - *
   23.30 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   23.31 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   23.32 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   23.33 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   23.34 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   23.35 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   23.36 - * IN THE SOFTWARE.
   23.37 - */
   23.38 -
   23.39 -#include <linux/config.h>
   23.40 -#include <linux/module.h>
   23.41 -#include <linux/version.h>
   23.42 -#include <linux/kernel.h>
   23.43 -#include <linux/slab.h>
   23.44 -#include <linux/errno.h>
   23.45 -#include <linux/interrupt.h>
   23.46 -#include <linux/init.h>
   23.47 -#include <xen/tpmfe.h>
   23.48 -#include <linux/err.h>
   23.49 -#include <linux/mutex.h>
   23.50 -#include <asm/io.h>
   23.51 -#include <xen/evtchn.h>
   23.52 -#include <xen/interface/grant_table.h>
   23.53 -#include <xen/interface/io/tpmif.h>
   23.54 -#include <asm/uaccess.h>
   23.55 -#include <xen/xenbus.h>
   23.56 -#include <xen/interface/grant_table.h>
   23.57 -
   23.58 -#include "tpmfront.h"
   23.59 -
   23.60 -#undef DEBUG
   23.61 -
   23.62 -/* locally visible variables */
   23.63 -static grant_ref_t gref_head;
   23.64 -static struct tpm_private *my_priv;
   23.65 -
   23.66 -/* local function prototypes */
   23.67 -static irqreturn_t tpmif_int(int irq,
   23.68 -                             void *tpm_priv,
   23.69 -                             struct pt_regs *ptregs);
   23.70 -static void tpmif_rx_action(unsigned long unused);
   23.71 -static int tpmif_connect(struct xenbus_device *dev,
   23.72 -                         struct tpm_private *tp,
   23.73 -                         domid_t domid);
   23.74 -static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
   23.75 -static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
   23.76 -static void tpmif_free_tx_buffers(struct tpm_private *tp);
   23.77 -static void tpmif_set_connected_state(struct tpm_private *tp,
   23.78 -                                      u8 newstate);
   23.79 -static int tpm_xmit(struct tpm_private *tp,
   23.80 -                    const u8 * buf, size_t count, int userbuffer,
   23.81 -                    void *remember);
   23.82 -static void destroy_tpmring(struct tpm_private *tp);
   23.83 -
   23.84 -#define DPRINTK(fmt, args...) \
   23.85 -    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   23.86 -#define IPRINTK(fmt, args...) \
   23.87 -    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
   23.88 -#define WPRINTK(fmt, args...) \
   23.89 -    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
   23.90 -
   23.91 -#define GRANT_INVALID_REF	0
   23.92 -
   23.93 -
   23.94 -static inline int
   23.95 -tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
   23.96 -               int isuserbuffer)
   23.97 -{
   23.98 -	int copied = len;
   23.99 -
  23.100 -	if (len > txb->size) {
  23.101 -		copied = txb->size;
  23.102 -	}
  23.103 -	if (isuserbuffer) {
  23.104 -		if (copy_from_user(txb->data, src, copied))
  23.105 -			return -EFAULT;
  23.106 -	} else {
  23.107 -		memcpy(txb->data, src, copied);
  23.108 -	}
  23.109 -	txb->len = len;
  23.110 -	return copied;
  23.111 -}
  23.112 -
  23.113 -static inline struct tx_buffer *tx_buffer_alloc(void)
  23.114 -{
  23.115 -	struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
  23.116 -					GFP_KERNEL);
  23.117 -
  23.118 -	if (txb) {
  23.119 -		txb->len = 0;
  23.120 -		txb->size = PAGE_SIZE;
  23.121 -		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
  23.122 -		if (txb->data == NULL) {
  23.123 -			kfree(txb);
  23.124 -			txb = NULL;
  23.125 -		}
  23.126 -	}
  23.127 -	return txb;
  23.128 -}
  23.129 -
  23.130 -
  23.131 -static inline void tx_buffer_free(struct tx_buffer *txb)
  23.132 -{
  23.133 -	if (txb) {
  23.134 -		free_page((long)txb->data);
  23.135 -		kfree(txb);
  23.136 -	}
  23.137 -}
  23.138 -
  23.139 -/**************************************************************
  23.140 - Utility function for the tpm_private structure
  23.141 -**************************************************************/
  23.142 -static inline void tpm_private_init(struct tpm_private *tp)
  23.143 -{
  23.144 -	spin_lock_init(&tp->tx_lock);
  23.145 -	init_waitqueue_head(&tp->wait_q);
  23.146 -}
  23.147 -
  23.148 -static inline void tpm_private_free(void)
  23.149 -{
  23.150 -	tpmif_free_tx_buffers(my_priv);
  23.151 -	kfree(my_priv);
  23.152 -	my_priv = NULL;
  23.153 -}
  23.154 -
  23.155 -static struct tpm_private *tpm_private_get(void)
  23.156 -{
  23.157 -	int err;
  23.158 -	if (!my_priv) {
  23.159 -		my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
  23.160 -		if (my_priv) {
  23.161 -			tpm_private_init(my_priv);
  23.162 -			err = tpmif_allocate_tx_buffers(my_priv);
  23.163 -			if (err < 0) {
  23.164 -				tpm_private_free();
  23.165 -			}
  23.166 -		}
  23.167 -	}
  23.168 -	return my_priv;
  23.169 -}
  23.170 -
  23.171 -/**************************************************************
  23.172 -
  23.173 - The interface to let the tpm plugin register its callback
  23.174 - function and send data to another partition using this module
  23.175 -
  23.176 -**************************************************************/
  23.177 -
  23.178 -static DEFINE_MUTEX(upperlayer_lock);
  23.179 -static DEFINE_MUTEX(suspend_lock);
  23.180 -static struct tpmfe_device *upperlayer_tpmfe;
  23.181 -
  23.182 -/*
  23.183 - * Send data via this module by calling this function
  23.184 - */
  23.185 -int tpm_fe_send(struct tpm_private *tp, const u8 * buf, size_t count, void *ptr)
  23.186 -{
  23.187 -	int sent;
  23.188 -
  23.189 -	mutex_lock(&suspend_lock);
  23.190 -	sent = tpm_xmit(tp, buf, count, 0, ptr);
  23.191 -	mutex_unlock(&suspend_lock);
  23.192 -
  23.193 -	return sent;
  23.194 -}
  23.195 -EXPORT_SYMBOL(tpm_fe_send);
  23.196 -
  23.197 -/*
  23.198 - * Register a callback for receiving data from this module
  23.199 - */
  23.200 -int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
  23.201 -{
  23.202 -	int rc = 0;
  23.203 -
  23.204 -	mutex_lock(&upperlayer_lock);
  23.205 -	if (NULL == upperlayer_tpmfe) {
  23.206 -		upperlayer_tpmfe = tpmfe_dev;
  23.207 -		tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
  23.208 -		tpmfe_dev->tpm_private = tpm_private_get();
  23.209 -		if (!tpmfe_dev->tpm_private) {
  23.210 -			rc = -ENOMEM;
  23.211 -		}
  23.212 -	} else {
  23.213 -		rc = -EBUSY;
  23.214 -	}
  23.215 -	mutex_unlock(&upperlayer_lock);
  23.216 -	return rc;
  23.217 -}
  23.218 -EXPORT_SYMBOL(tpm_fe_register_receiver);
  23.219 -
  23.220 -/*
  23.221 - * Unregister the callback for receiving data from this module
  23.222 - */
  23.223 -void tpm_fe_unregister_receiver(void)
  23.224 -{
  23.225 -	mutex_lock(&upperlayer_lock);
  23.226 -	upperlayer_tpmfe = NULL;
  23.227 -	mutex_unlock(&upperlayer_lock);
  23.228 -}
  23.229 -EXPORT_SYMBOL(tpm_fe_unregister_receiver);
  23.230 -
  23.231 -/*
  23.232 - * Call this function to send data to the upper layer's
  23.233 - * registered receiver function.
  23.234 - */
  23.235 -static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
  23.236 -                                  const void *ptr)
  23.237 -{
  23.238 -	int rc = 0;
  23.239 -
  23.240 -	mutex_lock(&upperlayer_lock);
  23.241 -
  23.242 -	if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
  23.243 -		rc = upperlayer_tpmfe->receive(buf, count, ptr);
  23.244 -
  23.245 -	mutex_unlock(&upperlayer_lock);
  23.246 -	return rc;
  23.247 -}
  23.248 -
  23.249 -/**************************************************************
  23.250 - XENBUS support code
  23.251 -**************************************************************/
  23.252 -
  23.253 -static int setup_tpmring(struct xenbus_device *dev,
  23.254 -                         struct tpm_private *tp)
  23.255 -{
  23.256 -	tpmif_tx_interface_t *sring;
  23.257 -	int err;
  23.258 -
  23.259 -	tp->ring_ref = GRANT_INVALID_REF;
  23.260 -
  23.261 -	sring = (void *)__get_free_page(GFP_KERNEL);
  23.262 -	if (!sring) {
  23.263 -		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  23.264 -		return -ENOMEM;
  23.265 -	}
  23.266 -	tp->tx = sring;
  23.267 -
  23.268 -	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
  23.269 -	if (err < 0) {
  23.270 -		free_page((unsigned long)sring);
  23.271 -		tp->tx = NULL;
  23.272 -		xenbus_dev_fatal(dev, err, "allocating grant reference");
  23.273 -		goto fail;
  23.274 -	}
  23.275 -	tp->ring_ref = err;
  23.276 -
  23.277 -	err = tpmif_connect(dev, tp, dev->otherend_id);
  23.278 -	if (err)
  23.279 -		goto fail;
  23.280 -
  23.281 -	return 0;
  23.282 -fail:
  23.283 -	destroy_tpmring(tp);
  23.284 -	return err;
  23.285 -}
  23.286 -
  23.287 -
  23.288 -static void destroy_tpmring(struct tpm_private *tp)
  23.289 -{
  23.290 -	tpmif_set_connected_state(tp, 0);
  23.291 -
  23.292 -	if (tp->ring_ref != GRANT_INVALID_REF) {
  23.293 -		gnttab_end_foreign_access(tp->ring_ref, 0,
  23.294 -					  (unsigned long)tp->tx);
  23.295 -		tp->ring_ref = GRANT_INVALID_REF;
  23.296 -		tp->tx = NULL;
  23.297 -	}
  23.298 -
  23.299 -	if (tp->irq)
  23.300 -		unbind_from_irqhandler(tp->irq, tp);
  23.301 -
  23.302 -	tp->evtchn = tp->irq = 0;
  23.303 -}
  23.304 -
  23.305 -
  23.306 -static int talk_to_backend(struct xenbus_device *dev,
  23.307 -                           struct tpm_private *tp)
  23.308 -{
  23.309 -	const char *message = NULL;
  23.310 -	int err;
  23.311 -	xenbus_transaction_t xbt;
  23.312 -
  23.313 -	err = setup_tpmring(dev, tp);
  23.314 -	if (err) {
  23.315 -		xenbus_dev_fatal(dev, err, "setting up ring");
  23.316 -		goto out;
  23.317 -	}
  23.318 -
  23.319 -again:
  23.320 -	err = xenbus_transaction_start(&xbt);
  23.321 -	if (err) {
  23.322 -		xenbus_dev_fatal(dev, err, "starting transaction");
  23.323 -		goto destroy_tpmring;
  23.324 -	}
  23.325 -
  23.326 -	err = xenbus_printf(xbt, dev->nodename,
  23.327 -	                    "ring-ref","%u", tp->ring_ref);
  23.328 -	if (err) {
  23.329 -		message = "writing ring-ref";
  23.330 -		goto abort_transaction;
  23.331 -	}
  23.332 -
  23.333 -	err = xenbus_printf(xbt, dev->nodename,
  23.334 -			    "event-channel", "%u", tp->evtchn);
  23.335 -	if (err) {
  23.336 -		message = "writing event-channel";
  23.337 -		goto abort_transaction;
  23.338 -	}
  23.339 -
  23.340 -	err = xenbus_transaction_end(xbt, 0);
  23.341 -	if (err == -EAGAIN)
  23.342 -		goto again;
  23.343 -	if (err) {
  23.344 -		xenbus_dev_fatal(dev, err, "completing transaction");
  23.345 -		goto destroy_tpmring;
  23.346 -	}
  23.347 -
  23.348 -	xenbus_switch_state(dev, XenbusStateConnected);
  23.349 -
  23.350 -	return 0;
  23.351 -
  23.352 -abort_transaction:
  23.353 -	xenbus_transaction_end(xbt, 1);
  23.354 -	if (message)
  23.355 -		xenbus_dev_error(dev, err, "%s", message);
  23.356 -destroy_tpmring:
  23.357 -	destroy_tpmring(tp);
  23.358 -out:
  23.359 -	return err;
  23.360 -}
  23.361 -
  23.362 -/**
  23.363 - * Callback received when the backend's state changes.
  23.364 - */
  23.365 -static void backend_changed(struct xenbus_device *dev,
  23.366 -			    XenbusState backend_state)
  23.367 -{
  23.368 -	struct tpm_private *tp = dev->data;
  23.369 -	DPRINTK("\n");
  23.370 -
  23.371 -	switch (backend_state) {
  23.372 -	case XenbusStateInitialising:
  23.373 -	case XenbusStateInitWait:
  23.374 -	case XenbusStateInitialised:
  23.375 -	case XenbusStateUnknown:
  23.376 -		break;
  23.377 -
  23.378 -	case XenbusStateConnected:
  23.379 -		tpmif_set_connected_state(tp, 1);
  23.380 -		break;
  23.381 -
  23.382 -	case XenbusStateClosing:
  23.383 -		tpmif_set_connected_state(tp, 0);
  23.384 -		break;
  23.385 -
  23.386 -	case XenbusStateClosed:
  23.387 -		if (tp->is_suspended == 0) {
  23.388 -			device_unregister(&dev->dev);
  23.389 -		}
  23.390 -		xenbus_switch_state(dev, XenbusStateClosed);
  23.391 -		break;
  23.392 -	}
  23.393 -}
  23.394 -
  23.395 -
  23.396 -static int tpmfront_probe(struct xenbus_device *dev,
  23.397 -                          const struct xenbus_device_id *id)
  23.398 -{
  23.399 -	int err;
  23.400 -	int handle;
  23.401 -	struct tpm_private *tp = tpm_private_get();
  23.402 -
  23.403 -	if (!tp)
  23.404 -		return -ENOMEM;
  23.405 -
  23.406 -	err = xenbus_scanf(XBT_NULL, dev->nodename,
  23.407 -	                   "handle", "%i", &handle);
  23.408 -	if (XENBUS_EXIST_ERR(err))
  23.409 -		return err;
  23.410 -
  23.411 -	if (err < 0) {
  23.412 -		xenbus_dev_fatal(dev,err,"reading virtual-device");
  23.413 -		return err;
  23.414 -	}
  23.415 -
  23.416 -	tp->dev = dev;
  23.417 -	dev->data = tp;
  23.418 -
  23.419 -	err = talk_to_backend(dev, tp);
  23.420 -	if (err) {
  23.421 -		tpm_private_free();
  23.422 -		dev->data = NULL;
  23.423 -		return err;
  23.424 -	}
  23.425 -	return 0;
  23.426 -}
  23.427 -
  23.428 -
  23.429 -static int tpmfront_remove(struct xenbus_device *dev)
  23.430 -{
  23.431 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  23.432 -	destroy_tpmring(tp);
  23.433 -	return 0;
  23.434 -}
  23.435 -
  23.436 -static int tpmfront_suspend(struct xenbus_device *dev)
  23.437 -{
  23.438 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  23.439 -	u32 ctr;
  23.440 -
  23.441 -	/* lock, so no app can send */
  23.442 -	mutex_lock(&suspend_lock);
  23.443 -	xenbus_switch_state(dev, XenbusStateClosed);
  23.444 -	tp->is_suspended = 1;
  23.445 -
  23.446 -	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
  23.447 -		if ((ctr % 10) == 0)
  23.448 -			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
  23.449 -		/*
  23.450 -		 * Wait for a request to be responded to.
  23.451 -		 */
  23.452 -		interruptible_sleep_on_timeout(&tp->wait_q, 100);
  23.453 -	}
  23.454 -
  23.455 -	if (atomic_read(&tp->tx_busy)) {
  23.456 -		/*
  23.457 -		 * A temporary work-around.
  23.458 -		 */
  23.459 -		printk("TPM-FE [WARNING]: Resetting busy flag.");
  23.460 -		atomic_set(&tp->tx_busy, 0);
  23.461 -	}
  23.462 -
  23.463 -	return 0;
  23.464 -}
  23.465 -
  23.466 -static int tpmfront_resume(struct xenbus_device *dev)
  23.467 -{
  23.468 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  23.469 -	destroy_tpmring(tp);
  23.470 -	return talk_to_backend(dev, tp);
  23.471 -}
  23.472 -
  23.473 -static int tpmif_connect(struct xenbus_device *dev,
  23.474 -                         struct tpm_private *tp,
  23.475 -                         domid_t domid)
  23.476 -{
  23.477 -	int err;
  23.478 -
  23.479 -	tp->backend_id = domid;
  23.480 -
  23.481 -	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
  23.482 -	if (err)
  23.483 -		return err;
  23.484 -
  23.485 -	err = bind_evtchn_to_irqhandler(tp->evtchn,
  23.486 -					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
  23.487 -					tp);
  23.488 -	if (err <= 0) {
  23.489 -		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  23.490 -		return err;
  23.491 -	}
  23.492 -
  23.493 -	tp->irq = err;
  23.494 -	return 0;
  23.495 -}
  23.496 -
  23.497 -static struct xenbus_device_id tpmfront_ids[] = {
  23.498 -	{ "vtpm" },
  23.499 -	{ "" }
  23.500 -};
  23.501 -
  23.502 -static struct xenbus_driver tpmfront = {
  23.503 -	.name = "vtpm",
  23.504 -	.owner = THIS_MODULE,
  23.505 -	.ids = tpmfront_ids,
  23.506 -	.probe = tpmfront_probe,
  23.507 -	.remove =  tpmfront_remove,
  23.508 -	.resume = tpmfront_resume,
  23.509 -	.otherend_changed = backend_changed,
  23.510 -	.suspend = tpmfront_suspend,
  23.511 -};
  23.512 -
  23.513 -static void __init init_tpm_xenbus(void)
  23.514 -{
  23.515 -	xenbus_register_frontend(&tpmfront);
  23.516 -}
  23.517 -
  23.518 -static void __exit exit_tpm_xenbus(void)
  23.519 -{
  23.520 -	xenbus_unregister_driver(&tpmfront);
  23.521 -}
  23.522 -
  23.523 -static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
  23.524 -{
  23.525 -	unsigned int i;
  23.526 -
  23.527 -	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  23.528 -		tp->tx_buffers[i] = tx_buffer_alloc();
  23.529 -		if (!tp->tx_buffers[i]) {
  23.530 -			tpmif_free_tx_buffers(tp);
  23.531 -			return -ENOMEM;
  23.532 -		}
  23.533 -	}
  23.534 -	return 0;
  23.535 -}
  23.536 -
  23.537 -static void tpmif_free_tx_buffers(struct tpm_private *tp)
  23.538 -{
  23.539 -	unsigned int i;
  23.540 -
  23.541 -	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  23.542 -		tx_buffer_free(tp->tx_buffers[i]);
  23.543 -	}
  23.544 -}
  23.545 -
  23.546 -static void tpmif_rx_action(unsigned long priv)
  23.547 -{
  23.548 -	struct tpm_private *tp = (struct tpm_private *)priv;
  23.549 -
  23.550 -	int i = 0;
  23.551 -	unsigned int received;
  23.552 -	unsigned int offset = 0;
  23.553 -	u8 *buffer;
  23.554 -	tpmif_tx_request_t *tx;
  23.555 -	tx = &tp->tx->ring[i].req;
  23.556 -
  23.557 -	received = tx->size;
  23.558 -
  23.559 -	buffer = kmalloc(received, GFP_KERNEL);
  23.560 -	if (NULL == buffer) {
  23.561 -		goto exit;
  23.562 -	}
  23.563 -
  23.564 -	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
  23.565 -		struct tx_buffer *txb = tp->tx_buffers[i];
  23.566 -		tpmif_tx_request_t *tx;
  23.567 -		unsigned int tocopy;
  23.568 -
  23.569 -		tx = &tp->tx->ring[i].req;
  23.570 -		tocopy = tx->size;
  23.571 -		if (tocopy > PAGE_SIZE) {
  23.572 -			tocopy = PAGE_SIZE;
  23.573 -		}
  23.574 -
  23.575 -		memcpy(&buffer[offset], txb->data, tocopy);
  23.576 -
  23.577 -		gnttab_release_grant_reference(&gref_head, tx->ref);
  23.578 -
  23.579 -		offset += tocopy;
  23.580 -	}
  23.581 -
  23.582 -	tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
  23.583 -	kfree(buffer);
  23.584 -
  23.585 -exit:
  23.586 -	atomic_set(&tp->tx_busy, 0);
  23.587 -	wake_up_interruptible(&tp->wait_q);
  23.588 -}
  23.589 -
  23.590 -
  23.591 -static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
  23.592 -{
  23.593 -	struct tpm_private *tp = tpm_priv;
  23.594 -	unsigned long flags;
  23.595 -
  23.596 -	spin_lock_irqsave(&tp->tx_lock, flags);
  23.597 -	tpmif_rx_tasklet.data = (unsigned long)tp;
  23.598 -	tasklet_schedule(&tpmif_rx_tasklet);
  23.599 -	spin_unlock_irqrestore(&tp->tx_lock, flags);
  23.600 -
  23.601 -	return IRQ_HANDLED;
  23.602 -}
  23.603 -
  23.604 -
  23.605 -static int tpm_xmit(struct tpm_private *tp,
  23.606 -                    const u8 * buf, size_t count, int isuserbuffer,
  23.607 -                    void *remember)
  23.608 -{
  23.609 -	tpmif_tx_request_t *tx;
  23.610 -	TPMIF_RING_IDX i;
  23.611 -	unsigned int offset = 0;
  23.612 -
  23.613 -	spin_lock_irq(&tp->tx_lock);
  23.614 -
  23.615 -	if (unlikely(atomic_read(&tp->tx_busy))) {
  23.616 -		printk("tpm_xmit: There's an outstanding request/response "
  23.617 -		       "on the way!\n");
  23.618 -		spin_unlock_irq(&tp->tx_lock);
  23.619 -		return -EBUSY;
  23.620 -	}
  23.621 -
  23.622 -	if (tp->is_connected != 1) {
  23.623 -		spin_unlock_irq(&tp->tx_lock);
  23.624 -		return -EIO;
  23.625 -	}
  23.626 -
  23.627 -	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
  23.628 -		struct tx_buffer *txb = tp->tx_buffers[i];
  23.629 -		int copied;
  23.630 -
  23.631 -		if (NULL == txb) {
  23.632 -			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
  23.633 -				"Not transmitting anything!\n", i);
  23.634 -			spin_unlock_irq(&tp->tx_lock);
  23.635 -			return -EFAULT;
  23.636 -		}
  23.637 -		copied = tx_buffer_copy(txb, &buf[offset], count,
  23.638 -		                        isuserbuffer);
  23.639 -		if (copied < 0) {
  23.640 -			/* An error occurred */
  23.641 -			spin_unlock_irq(&tp->tx_lock);
  23.642 -			return copied;
  23.643 -		}
  23.644 -		count -= copied;
  23.645 -		offset += copied;
  23.646 -
  23.647 -		tx = &tp->tx->ring[i].req;
  23.648 -
  23.649 -		tx->addr = virt_to_machine(txb->data);
  23.650 -		tx->size = txb->len;
  23.651 -
  23.652 -		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
  23.653 -		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
  23.654 -
  23.655 -		/* get the granttable reference for this page */
  23.656 -		tx->ref = gnttab_claim_grant_reference(&gref_head);
  23.657 -
  23.658 -		if (-ENOSPC == tx->ref) {
  23.659 -			spin_unlock_irq(&tp->tx_lock);
  23.660 -			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
  23.661 -			return -ENOSPC;
  23.662 -		}
  23.663 -		gnttab_grant_foreign_access_ref( tx->ref,
  23.664 -		                                 tp->backend_id,
  23.665 -		                                 (tx->addr >> PAGE_SHIFT),
  23.666 -		                                 0 /*RW*/);
  23.667 -		wmb();
  23.668 -	}
  23.669 -
  23.670 -	atomic_set(&tp->tx_busy, 1);
  23.671 -	tp->tx_remember = remember;
  23.672 -	mb();
  23.673 -
  23.674 -	DPRINTK("Notifying backend via event channel %d\n",
  23.675 -	        tp->evtchn);
  23.676 -
  23.677 -	notify_remote_via_irq(tp->irq);
  23.678 -
  23.679 -	spin_unlock_irq(&tp->tx_lock);
  23.680 -	return offset;
  23.681 -}
  23.682 -
  23.683 -
  23.684 -static void tpmif_notify_upperlayer(struct tpm_private *tp)
  23.685 -{
  23.686 -	/*
  23.687 -	 * Notify upper layer about the state of the connection
  23.688 -	 * to the BE.
  23.689 -	 */
  23.690 -	mutex_lock(&upperlayer_lock);
  23.691 -
  23.692 -	if (upperlayer_tpmfe != NULL) {
  23.693 -		if (tp->is_connected) {
  23.694 -			upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
  23.695 -		} else {
  23.696 -			upperlayer_tpmfe->status(0);
  23.697 -		}
  23.698 -	}
  23.699 -	mutex_unlock(&upperlayer_lock);
  23.700 -}
  23.701 -
  23.702 -
  23.703 -static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
  23.704 -{
  23.705 -	/*
  23.706 -	 * Don't notify upper layer if we are in suspend mode and
  23.707 -	 * should disconnect - assumption is that we will resume
  23.708 -	 * The mutex keeps apps from sending.
  23.709 -	 */
  23.710 -	if (is_connected == 0 && tp->is_suspended == 1) {
  23.711 -		return;
  23.712 -	}
  23.713 -
  23.714 -	/*
  23.715 -	 * Unlock the mutex if we are connected again
  23.716 -	 * after being suspended - now resuming.
  23.717 -	 * This also removes the suspend state.
  23.718 -	 */
  23.719 -	if (is_connected == 1 && tp->is_suspended == 1) {
  23.720 -		tp->is_suspended = 0;
  23.721 -		/* unlock, so apps can resume sending */
  23.722 -		mutex_unlock(&suspend_lock);
  23.723 -	}
  23.724 -
  23.725 -	if (is_connected != tp->is_connected) {
  23.726 -		tp->is_connected = is_connected;
  23.727 -		tpmif_notify_upperlayer(tp);
  23.728 -	}
  23.729 -}
  23.730 -
  23.731 -
  23.732 -/* =================================================================
  23.733 - * Initialization function.
  23.734 - * =================================================================
  23.735 - */
  23.736 -
  23.737 -static int __init tpmif_init(void)
  23.738 -{
  23.739 -	IPRINTK("Initialising the vTPM driver.\n");
  23.740 -	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
  23.741 -	                                     &gref_head ) < 0) {
  23.742 -		return -EFAULT;
  23.743 -	}
  23.744 -
  23.745 -	init_tpm_xenbus();
  23.746 -
  23.747 -	return 0;
  23.748 -}
  23.749 -
  23.750 -module_init(tpmif_init);
  23.751 -
  23.752 -static void __exit tpmif_exit(void)
  23.753 -{
  23.754 -	exit_tpm_xenbus();
  23.755 -	gnttab_free_grant_references(gref_head);
  23.756 -}
  23.757 -
  23.758 -module_exit(tpmif_exit);
  23.759 -
  23.760 -MODULE_LICENSE("Dual BSD/GPL");
  23.761 -
  23.762 -/*
  23.763 - * Local variables:
  23.764 - *  c-file-style: "linux"
  23.765 - *  indent-tabs-mode: t
  23.766 - *  c-indent-level: 8
  23.767 - *  c-basic-offset: 8
  23.768 - *  tab-width: 8
  23.769 - * End:
  23.770 - */
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Tue May 02 18:17:59 2006 +0100
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,40 +0,0 @@
    24.4 -#ifndef TPM_FRONT_H
    24.5 -#define TPM_FRONT_H
    24.6 -
    24.7 -struct tpm_private {
    24.8 -	tpmif_tx_interface_t *tx;
    24.9 -	unsigned int evtchn;
   24.10 -	unsigned int irq;
   24.11 -	u8 is_connected;
   24.12 -	u8 is_suspended;
   24.13 -
   24.14 -	spinlock_t tx_lock;
   24.15 -
   24.16 -	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
   24.17 -
   24.18 -	atomic_t tx_busy;
   24.19 -	void *tx_remember;
   24.20 -	domid_t backend_id;
   24.21 -	wait_queue_head_t wait_q;
   24.22 -
   24.23 -	struct xenbus_device *dev;
   24.24 -	int ring_ref;
   24.25 -};
   24.26 -
   24.27 -struct tx_buffer {
   24.28 -	unsigned int size;	// available space in data
   24.29 -	unsigned int len;	// used space in data
   24.30 -	unsigned char *data;	// pointer to a page
   24.31 -};
   24.32 -
   24.33 -#endif
   24.34 -
   24.35 -/*
   24.36 - * Local variables:
   24.37 - *  c-file-style: "linux"
   24.38 - *  indent-tabs-mode: t
   24.39 - *  c-indent-level: 8
   24.40 - *  c-basic-offset: 8
   24.41 - *  tab-width: 8
   24.42 - * End:
   24.43 - */
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Tue May 02 18:17:59 2006 +0100
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu May 04 14:19:19 2006 +0100
    25.3 @@ -183,7 +183,7 @@ void *xenbus_dev_request_and_reply(struc
    25.4  
    25.5  	mutex_unlock(&xs_state.request_mutex);
    25.6  
    25.7 -	if ((msg->type == XS_TRANSACTION_END) ||
    25.8 +	if ((req_msg.type == XS_TRANSACTION_END) ||
    25.9  	    ((req_msg.type == XS_TRANSACTION_START) &&
   25.10  	     (msg->type == XS_ERROR)))
   25.11  		up_read(&xs_state.suspend_mutex);
    26.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h	Tue May 02 18:17:59 2006 +0100
    26.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h	Thu May 04 14:19:19 2006 +0100
    26.3 @@ -134,7 +134,6 @@ HYPERVISOR_poll(
    26.4  #define	pte_mfn(_x)	pte_pfn(_x)
    26.5  #define __pte_ma(_x)	((pte_t) {(_x)})
    26.6  #define phys_to_machine_mapping_valid(_x)	(1)
    26.7 -#define	kmap_flush_unused()	do {} while (0)
    26.8  #define pfn_pte_ma(_x,_y)	__pte_ma(0)
    26.9  #ifndef CONFIG_XEN_IA64_DOM0_VP //XXX
   26.10  #define set_phys_to_machine(_x,_y)	do {} while (0)
    27.1 --- a/linux-2.6-xen-sparse/include/xen/tpmfe.h	Tue May 02 18:17:59 2006 +0100
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,40 +0,0 @@
    27.4 -#ifndef TPM_FE_H
    27.5 -#define TPM_FE_H
    27.6 -
    27.7 -struct tpm_private;
    27.8 -
    27.9 -struct tpmfe_device {
   27.10 -	/*
   27.11 -	 * Let upper layer receive data from front-end
   27.12 -	 */
   27.13 -	int (*receive)(const u8 *buffer, size_t count, const void *ptr);
   27.14 -	/*
   27.15 -	 * Indicate the status of the front-end to the upper
   27.16 -	 * layer.
   27.17 -	 */
   27.18 -	void (*status)(unsigned int flags);
   27.19 -
   27.20 -	/*
   27.21 -	 * This field indicates the maximum size the driver can
   27.22 -	 * transfer in one chunk. It is filled out by the front-end
   27.23 -	 * driver and should be propagated to the generic tpm driver
   27.24 -	 * for allocation of buffers.
   27.25 -	 */
   27.26 -	unsigned int max_tx_size;
   27.27 -	/*
   27.28 -	 * The following is a private structure of the underlying
   27.29 -	 * driver. It's expected as first parameter in the send function.
   27.30 -	 */
   27.31 -	struct tpm_private *tpm_private;
   27.32 -};
   27.33 -
   27.34 -enum {
   27.35 -	TPMFE_STATUS_DISCONNECTED = 0x0,
   27.36 -	TPMFE_STATUS_CONNECTED = 0x1
   27.37 -};
   27.38 -
   27.39 -int tpm_fe_send(struct tpm_private * tp, const u8 * buf, size_t count, void *ptr);
   27.40 -int tpm_fe_register_receiver(struct tpmfe_device *);
   27.41 -void tpm_fe_unregister_receiver(void);
   27.42 -
   27.43 -#endif
    28.1 --- a/linux-2.6-xen-sparse/mm/Kconfig	Tue May 02 18:17:59 2006 +0100
    28.2 +++ b/linux-2.6-xen-sparse/mm/Kconfig	Thu May 04 14:19:19 2006 +0100
    28.3 @@ -126,14 +126,14 @@ comment "Memory hotplug is currently inc
    28.4  # Default to 4 for wider testing, though 8 might be more appropriate.
    28.5  # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
    28.6  # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
    28.7 -# XEN uses the mapping field on pagetable pages to store a pointer to
    28.8 -# the destructor.
    28.9 +# XEN on x86 architecture uses the mapping field on pagetable pages to store a
   28.10 +# pointer to the destructor. This conflicts with pte_lock_deinit().
   28.11  #
   28.12  config SPLIT_PTLOCK_CPUS
   28.13  	int
   28.14  	default "4096" if ARM && !CPU_CACHE_VIPT
   28.15  	default "4096" if PARISC && !PA20
   28.16 -	default "4096" if XEN
   28.17 +	default "4096" if X86_XEN || X86_64_XEN
   28.18  	default "4"
   28.19  
   28.20  #
    29.1 --- a/tools/python/xen/xend/XendBootloader.py	Tue May 02 18:17:59 2006 +0100
    29.2 +++ b/tools/python/xen/xend/XendBootloader.py	Thu May 04 14:19:19 2006 +0100
    29.3 @@ -19,13 +19,13 @@ import sxp
    29.4  from XendLogging import log
    29.5  from XendError import VmError
    29.6  
    29.7 -def bootloader(blexec, disk, quiet = 0, entry = None):
    29.8 +def bootloader(blexec, disk, quiet = 0, blargs = None):
    29.9      """Run the boot loader executable on the given disk and return a
   29.10      config image.
   29.11      @param blexec  Binary to use as the boot loader
   29.12      @param disk Disk to run the boot loader on.
   29.13      @param quiet Run in non-interactive mode, just booting the default.
   29.14 -    @param entry Default entry to boot."""
   29.15 +    @param blargs Arguments to pass to the bootloader."""
   29.16      
   29.17      if not os.access(blexec, os.X_OK):
   29.18          msg = "Bootloader isn't executable"
   29.19 @@ -48,8 +48,8 @@ def bootloader(blexec, disk, quiet = 0, 
   29.20          if quiet:
   29.21              args.append("-q")
   29.22          args.append("--output=%s" %(fifo,))
   29.23 -        if entry is not None:
   29.24 -            args.append("--entry=%s" %(entry,))
   29.25 +        if blargs is not None:
   29.26 +            args.extend(blargs.split())
   29.27          args.append(disk)
   29.28  
   29.29          try:
    30.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Tue May 02 18:17:59 2006 +0100
    30.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Thu May 04 14:19:19 2006 +0100
    30.3 @@ -132,6 +132,7 @@ ROUNDTRIPPING_CONFIG_ENTRIES = [
    30.4      ('memory',     int),
    30.5      ('maxmem',     int),
    30.6      ('bootloader', str),
    30.7 +    ('bootloader_args', str),
    30.8      ('features', str),
    30.9      ]
   30.10  
   30.11 @@ -571,6 +572,7 @@ class XendDomainInfo:
   30.12              defaultInfo('memory',       lambda: 0)
   30.13              defaultInfo('maxmem',       lambda: 0)
   30.14              defaultInfo('bootloader',   lambda: None)
   30.15 +            defaultInfo('bootloader_args', lambda: None)            
   30.16              defaultInfo('backend',      lambda: [])
   30.17              defaultInfo('device',       lambda: [])
   30.18              defaultInfo('image',        lambda: None)
   30.19 @@ -1630,7 +1632,8 @@ class XendDomainInfo:
   30.20              if disk is None:
   30.21                  continue
   30.22              fn = blkdev_uname_to_file(disk)
   30.23 -            blcfg = bootloader(self.info['bootloader'], fn, 1)
   30.24 +            blcfg = bootloader(self.info['bootloader'], fn, 1,
   30.25 +                               self.info['bootloader_args'])
   30.26              break
   30.27          if blcfg is None:
   30.28              msg = "Had a bootloader specified, but can't find disk"
    31.1 --- a/tools/python/xen/xm/create.py	Tue May 02 18:17:59 2006 +0100
    31.2 +++ b/tools/python/xen/xm/create.py	Thu May 04 14:19:19 2006 +0100
    31.3 @@ -122,9 +122,13 @@ gopts.var('bootloader', val='FILE',
    31.4            fn=set_value, default=None,
    31.5            use="Path to bootloader.")
    31.6  
    31.7 +gopts.var('bootargs', val='NAME',
    31.8 +          fn=set_value, default=None,
    31.9 +          use="Arguments to pass to boot loader")
   31.10 +
   31.11  gopts.var('bootentry', val='NAME',
   31.12            fn=set_value, default=None,
   31.13 -          use="Entry to boot via boot loader")
   31.14 +          use="DEPRECATED.  Entry to boot via boot loader.  Use bootargs.")
   31.15  
   31.16  gopts.var('kernel', val='FILE',
   31.17            fn=set_value, default=None,
   31.18 @@ -620,8 +624,13 @@ def run_bootloader(vals):
   31.19      (uname, dev, mode, backend) = vals.disk[0]
   31.20      file = blkif.blkdev_uname_to_file(uname)
   31.21  
   31.22 +    if vals.bootentry:
   31.23 +        warn("The bootentry option is deprecated.  Use bootargs and pass "
   31.24 +             "--entry= directly.")
   31.25 +        vals.bootargs = "--entry=%s" %(vals.bootentry,)
   31.26 +
   31.27      return bootloader(vals.bootloader, file, not vals.console_autoconnect,
   31.28 -                      vals.bootentry)
   31.29 +                      vals.bootargs)
   31.30  
   31.31  def make_config(vals):
   31.32      """Create the domain configuration.
   31.33 @@ -654,8 +663,10 @@ def make_config(vals):
   31.34          config.append(['backend', ['tpmif']])
   31.35  
   31.36      if vals.bootloader:
   31.37 +        config_image = run_bootloader(vals)
   31.38          config.append(['bootloader', vals.bootloader])
   31.39 -        config_image = run_bootloader(vals)
   31.40 +        if vals.bootargs:
   31.41 +            config.append(['bootloader_args'], vals.bootargs)
   31.42      else:
   31.43          config_image = configure_image(vals)
   31.44      config.append(['image', config_image])
    32.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue May 02 18:17:59 2006 +0100
    32.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu May 04 14:19:19 2006 +0100
    32.3 @@ -79,6 +79,8 @@ void svm_dump_regs(const char *from, str
    32.4  
    32.5  static void svm_relinquish_guest_resources(struct domain *d);
    32.6  
    32.7 +/* Host save area */
    32.8 +struct host_save_area *host_save_area[ NR_CPUS ] = {0};
    32.9  static struct asid_pool ASIDpool[NR_CPUS];
   32.10  
   32.11  /*
   32.12 @@ -185,11 +187,16 @@ static inline void svm_inject_exception(
   32.13  void stop_svm(void)
   32.14  {
   32.15      u32 eax, edx;    
   32.16 +    int cpu = smp_processor_id();
   32.17  
   32.18      /* We turn off the EFER_SVME bit. */
   32.19      rdmsr(MSR_EFER, eax, edx);
   32.20      eax &= ~EFER_SVME;
   32.21      wrmsr(MSR_EFER, eax, edx);
   32.22 + 
   32.23 +    /* release the HSA */
   32.24 +    free_host_save_area( host_save_area[ cpu ] );
   32.25 +    host_save_area[ cpu ] = NULL;
   32.26  
   32.27      printk("AMD SVM Extension is disabled.\n");
   32.28  }
   32.29 @@ -431,8 +438,11 @@ unsigned long svm_get_ctrl_reg(struct vc
   32.30  int start_svm(void)
   32.31  {
   32.32      u32 eax, ecx, edx;
   32.33 -    
   32.34 -    /* Xen does not fill x86_capability words except 0. */
   32.35 +    u32 phys_hsa_lo, phys_hsa_hi;   
   32.36 +    u64 phys_hsa;
   32.37 +    int cpu = smp_processor_id();
   32.38 + 
   32.39 +   /* Xen does not fill x86_capability words except 0. */
   32.40      ecx = cpuid_ecx(0x80000001);
   32.41      boot_cpu_data.x86_capability[5] = ecx;
   32.42      
   32.43 @@ -443,7 +453,14 @@ int start_svm(void)
   32.44      eax |= EFER_SVME;
   32.45      wrmsr(MSR_EFER, eax, edx);
   32.46      asidpool_init(smp_processor_id());    
   32.47 -    printk("AMD SVM Extension is enabled for cpu %d.\n", smp_processor_id());
   32.48 +    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
   32.49 +
   32.50 +    /* Initialize the HSA for this core */
   32.51 +    host_save_area[ cpu ] = alloc_host_save_area();
   32.52 +    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
   32.53 +    phys_hsa_lo = (u32) phys_hsa;
   32.54 +    phys_hsa_hi = (u32) (phys_hsa >> 32);    
   32.55 +    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   32.56      
   32.57      /* Setup HVM interfaces */
   32.58      hvm_funcs.disable = stop_svm;
   32.59 @@ -546,20 +563,6 @@ void save_svm_cpu_user_regs(struct vcpu 
   32.60      ctxt->ds = vmcb->ds.sel;
   32.61  }
   32.62  
   32.63 -#if defined (__x86_64__)
   32.64 -void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v )
   32.65 -{
   32.66 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   32.67 -
   32.68 -    regs->rip    = vmcb->rip;
   32.69 -    regs->rsp    = vmcb->rsp;
   32.70 -    regs->rflags = vmcb->rflags;
   32.71 -    regs->cs     = vmcb->cs.sel;
   32.72 -    regs->ds     = vmcb->ds.sel;
   32.73 -    regs->es     = vmcb->es.sel;
   32.74 -    regs->ss     = vmcb->ss.sel;
   32.75 -}
   32.76 -#elif defined (__i386__)
   32.77  void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
   32.78  {
   32.79      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   32.80 @@ -571,11 +574,11 @@ void svm_store_cpu_user_regs(struct cpu_
   32.81      regs->ds     = vmcb->ds.sel;
   32.82      regs->es     = vmcb->es.sel;
   32.83      regs->ss     = vmcb->ss.sel;
   32.84 +    regs->fs     = vmcb->fs.sel;
   32.85 +    regs->gs     = vmcb->gs.sel;
   32.86  }
   32.87 -#endif
   32.88  
   32.89  /* XXX Use svm_load_cpu_guest_regs instead */
   32.90 -#if defined (__i386__)
   32.91  void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
   32.92  { 
   32.93      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   32.94 @@ -588,30 +591,17 @@ void svm_load_cpu_user_regs(struct vcpu 
   32.95      vmcb->rflags   = regs->eflags;
   32.96      vmcb->cs.sel   = regs->cs;
   32.97      vmcb->rip      = regs->eip;
   32.98 +
   32.99 +    vmcb->ds.sel   = regs->ds;
  32.100 +    vmcb->es.sel   = regs->es;
  32.101 +    vmcb->fs.sel   = regs->fs;
  32.102 +    vmcb->gs.sel   = regs->gs;
  32.103 +
  32.104      if (regs->eflags & EF_TF)
  32.105          *intercepts |= EXCEPTION_BITMAP_DB;
  32.106      else
  32.107          *intercepts &= ~EXCEPTION_BITMAP_DB;
  32.108  }
  32.109 -#else /* (__i386__) */
  32.110 -void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
  32.111 -{
  32.112 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  32.113 -    u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
  32.114 -    
  32.115 -    /* Write the guest register value into VMCB */
  32.116 -    vmcb->rax      = regs->rax;
  32.117 -    vmcb->ss.sel   = regs->ss;
  32.118 -    vmcb->rsp      = regs->rsp;   
  32.119 -    vmcb->rflags   = regs->rflags;
  32.120 -    vmcb->cs.sel   = regs->cs;
  32.121 -    vmcb->rip      = regs->rip;
  32.122 -    if (regs->rflags & EF_TF)
  32.123 -        *intercepts |= EXCEPTION_BITMAP_DB;
  32.124 -    else
  32.125 -        *intercepts &= ~EXCEPTION_BITMAP_DB;
  32.126 -}
  32.127 -#endif /* !(__i386__) */
  32.128  
  32.129  int svm_paging_enabled(struct vcpu *v)
  32.130  {
  32.131 @@ -735,10 +725,6 @@ static void svm_relinquish_guest_resourc
  32.132      {
  32.133          if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  32.134              continue;
  32.135 -#if 0
  32.136 -        /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
  32.137 -        free_host_save_area(v->arch.hvm_svm.host_save_area);
  32.138 -#endif
  32.139  
  32.140          destroy_vmcb(&v->arch.hvm_svm);
  32.141          free_monitor_pagetable(v);
    33.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue May 02 18:17:59 2006 +0100
    33.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu May 04 14:19:19 2006 +0100
    33.3 @@ -36,9 +36,11 @@
    33.4  #include <xen/kernel.h>
    33.5  #include <xen/domain_page.h>
    33.6  
    33.7 +extern struct host_save_area *host_save_area[];
    33.8  extern int svm_dbg_on;
    33.9  extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
   33.10                                    int oldcore, int newcore);
   33.11 +extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
   33.12  
   33.13  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
   33.14  
   33.15 @@ -309,8 +311,6 @@ int construct_vmcb(struct arch_svm_struc
   33.16  {
   33.17      int error;
   33.18      long rc=0;
   33.19 -    struct host_save_area *hsa = NULL;
   33.20 -    u64 phys_hsa;
   33.21  
   33.22      memset(arch_svm, 0, sizeof(struct arch_svm_struct));
   33.23  
   33.24 @@ -320,37 +320,10 @@ int construct_vmcb(struct arch_svm_struc
   33.25          goto err_out;
   33.26      }
   33.27  
   33.28 -    /* 
   33.29 -     * The following code is for allocating host_save_area.
   33.30 -     * Note: We either allocate a Host Save Area per core or per VCPU. 
   33.31 -     * However, we do not want a global data structure 
   33.32 -     * for HSA per core, we decided to implement a HSA for each VCPU. 
   33.33 -     * It will waste space since VCPU number is larger than core number. 
   33.34 -     * But before we find a better place for HSA for each core, we will 
   33.35 -     * stay will this solution.
   33.36 -     */
   33.37 -
   33.38 -    if (!(hsa = alloc_host_save_area())) 
   33.39 -    {
   33.40 -        printk("Failed to allocate Host Save Area\n");
   33.41 -        rc = -ENOMEM;
   33.42 -        goto err_out;
   33.43 -    }
   33.44 -
   33.45 -    phys_hsa = (u64) virt_to_maddr(hsa);
   33.46 -    arch_svm->host_save_area = hsa;
   33.47 -    arch_svm->host_save_pa   = phys_hsa;
   33.48 -
   33.49 +    /* update the HSA for the current Core */
   33.50 +    set_hsa_to_guest( arch_svm );
   33.51      arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
   33.52  
   33.53 -    if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
   33.54 -    {
   33.55 -        printk("construct_vmcb: load_vmcb failed: VMCB = %lx\n",
   33.56 -               (unsigned long) arch_svm->host_save_pa);
   33.57 -        rc = -EINVAL;         
   33.58 -        goto err_out;
   33.59 -    }
   33.60 -
   33.61      if ((error = construct_vmcb_controls(arch_svm))) 
   33.62      {
   33.63          printk("construct_vmcb: construct_vmcb_controls failed\n");
   33.64 @@ -458,19 +431,12 @@ void svm_do_launch(struct vcpu *v)
   33.65  }
   33.66  
   33.67  
   33.68 -int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa) 
   33.69 +void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
   33.70  {
   33.71 -    u32 phys_hsa_lo, phys_hsa_hi;
   33.72 -    
   33.73 -    phys_hsa_lo = (u32) phys_hsa;
   33.74 -    phys_hsa_hi = (u32) (phys_hsa >> 32);
   33.75 -    
   33.76 -    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   33.77 -    set_bit(ARCH_SVM_VMCB_LOADED, &arch_svm->flags); 
   33.78 -    return 0;
   33.79 +    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
   33.80 +    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
   33.81  }
   33.82  
   33.83 -
   33.84  /* 
   33.85   * Resume the guest.
   33.86   */
   33.87 @@ -481,6 +447,9 @@ void svm_do_resume(struct vcpu *v)
   33.88      struct hvm_time_info *time_info = &vpit->time_info;
   33.89  
   33.90      svm_stts(v);
   33.91 +
   33.92 +    /* make sure the HSA is set for the current core */
   33.93 +    set_hsa_to_guest( &v->arch.hvm_svm );
   33.94      
   33.95      /* pick up the elapsed PIT ticks and re-enable pit_timer */
   33.96      if ( time_info->first_injected ) {
    34.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue May 02 18:17:59 2006 +0100
    34.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu May 04 14:19:19 2006 +0100
    34.3 @@ -475,6 +475,45 @@ static void vmx_store_cpu_guest_regs(
    34.4          __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
    34.5  }
    34.6  
    34.7 +/*
    34.8 + * The VMX spec (section 4.3.1.2, Checks on Guest Segment
    34.9 + * Registers) says that virtual-8086 mode guests' segment
   34.10 + * base-address fields in the VMCS must be equal to their
   34.11 + * corresponding segment selector field shifted right by
   34.12 + * four bits upon vmentry.
   34.13 + *
   34.14 + * This function (called only for VM86-mode guests) fixes
   34.15 + * the bases to be consistent with the selectors in regs
   34.16 + * if they're not already.  Without this, we can fail the
   34.17 + * vmentry check mentioned above.
   34.18 + */
   34.19 +static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
   34.20 +{
   34.21 +    int err = 0;
   34.22 +    unsigned long base;
   34.23 +
   34.24 +    err |= __vmread(GUEST_ES_BASE, &base);
   34.25 +    if (regs->es << 4 != base)
   34.26 +        err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
   34.27 +    err |= __vmread(GUEST_CS_BASE, &base);
   34.28 +    if (regs->cs << 4 != base)
   34.29 +        err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
   34.30 +    err |= __vmread(GUEST_SS_BASE, &base);
   34.31 +    if (regs->ss << 4 != base)
   34.32 +        err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
   34.33 +    err |= __vmread(GUEST_DS_BASE, &base);
   34.34 +    if (regs->ds << 4 != base)
   34.35 +        err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
   34.36 +    err |= __vmread(GUEST_FS_BASE, &base);
   34.37 +    if (regs->fs << 4 != base)
   34.38 +        err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
   34.39 +    err |= __vmread(GUEST_GS_BASE, &base);
   34.40 +    if (regs->gs << 4 != base)
   34.41 +        err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
   34.42 +
   34.43 +    BUG_ON(err);
   34.44 +}
   34.45 +
   34.46  void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
   34.47  {
   34.48      if ( v != current )
   34.49 @@ -511,6 +550,8 @@ void vmx_load_cpu_guest_regs(struct vcpu
   34.50          __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   34.51      else
   34.52          __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   34.53 +    if (regs->eflags & EF_VM)
   34.54 +        fixup_vm86_seg_bases(regs);
   34.55  
   34.56      __vmwrite(GUEST_CS_SELECTOR, regs->cs);
   34.57      __vmwrite(GUEST_RIP, regs->eip);
    35.1 --- a/xen/arch/x86/x86_32/mm.c	Tue May 02 18:17:59 2006 +0100
    35.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu May 04 14:19:19 2006 +0100
    35.3 @@ -155,17 +155,10 @@ void subarch_init_memory(void)
    35.4       * 64-bit operations on them. Also, just for sanity, we assert the size
    35.5       * of the structure here.
    35.6       */
    35.7 -    if ( (offsetof(struct page_info, u.inuse._domain) != 
    35.8 -          (offsetof(struct page_info, count_info) + sizeof(u32))) ||
    35.9 -         ((offsetof(struct page_info, count_info) & 7) != 0) ||
   35.10 -         (sizeof(struct page_info) != 24) )
   35.11 -    {
   35.12 -        printk("Weird page_info layout (%ld,%ld,%d)\n",
   35.13 -               offsetof(struct page_info, count_info),
   35.14 -               offsetof(struct page_info, u.inuse._domain),
   35.15 -               sizeof(struct page_info));
   35.16 -        BUG();
   35.17 -    }
   35.18 +    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
   35.19 +                 (offsetof(struct page_info, count_info) + sizeof(u32)));
   35.20 +    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
   35.21 +    BUILD_BUG_ON(sizeof(struct page_info) != 24);
   35.22  
   35.23      /* M2P table is mappable read-only by privileged domains. */
   35.24      for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
    36.1 --- a/xen/arch/x86/x86_64/mm.c	Tue May 02 18:17:59 2006 +0100
    36.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu May 04 14:19:19 2006 +0100
    36.3 @@ -145,19 +145,11 @@ void subarch_init_memory(void)
    36.4       * count_info and domain fields must be adjacent, as we perform atomic
    36.5       * 64-bit operations on them.
    36.6       */
    36.7 -    if ( ((offsetof(struct page_info, u.inuse._domain) != 
    36.8 -           (offsetof(struct page_info, count_info) + sizeof(u32)))) ||
    36.9 -         ((offsetof(struct page_info, count_info) & 7) != 0) ||
   36.10 -         (sizeof(struct page_info) !=
   36.11 -          (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long))) )
   36.12 -    {
   36.13 -        printk("Weird page_info layout (%ld,%ld,%ld,%ld)\n",
   36.14 -               offsetof(struct page_info, count_info),
   36.15 -               offsetof(struct page_info, u.inuse._domain),
   36.16 -               sizeof(struct page_info),
   36.17 -               32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long));
   36.18 -        for ( ; ; ) ;
   36.19 -    }
   36.20 +    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
   36.21 +                 (offsetof(struct page_info, count_info) + sizeof(u32)));
   36.22 +    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
   36.23 +    BUILD_BUG_ON(sizeof(struct page_info) !=
   36.24 +                 (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
   36.25  
   36.26      /* M2P table is mappable read-only by privileged domains. */
   36.27      for ( v  = RDWR_MPT_VIRT_START;