ia64/xen-unstable
changeset 8052:28a117c5ea94
Merged.
author | emellor@leeni.uk.xensource.com |
---|---|
date | Fri Nov 25 11:19:09 2005 +0000 (2005-11-25) |
parents | 42474df9b248 9c81a3c5b33b |
children | 421ea766aaa0 84cf3d018bea |
files |
line diff
1.1 --- a/docs/misc/vtpm.txt Fri Nov 25 11:19:03 2005 +0000 1.2 +++ b/docs/misc/vtpm.txt Fri Nov 25 11:19:09 2005 +0000 1.3 @@ -73,7 +73,14 @@ information about the domain where the v 1.4 where the TPM backend has been compiled into - this has to be 1.5 domain 0 at the moment - and which TPM instance the user domain 1.6 is supposed to talk to. Note that each running VM must use a 1.7 -different instance and that using instance 0 is NOT allowed. 1.8 +different instance and that using instance 0 is NOT allowed. The 1.9 +instance parameter is taken as the desired instance number, but 1.10 +the actual instance number that is assigned to the virtual machine 1.11 +can be different. This is the case if for example that particular 1.12 +instance is already used by another virtual machine. The association 1.13 +of which TPM instance number is used by which virtual machine is 1.14 +kept in the file /etc/xen/vtpm.db. Associations are maintained by 1.15 +domain name and instance number. 1.16 1.17 Note: If you do not want TPM functionality for your user domain simply 1.18 leave out the 'vtpm' line in the configuration file.
2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Fri Nov 25 11:19:03 2005 +0000 2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Fri Nov 25 11:19:09 2005 +0000 2.3 @@ -708,7 +708,7 @@ void __init pgtable_cache_init(void) 2.4 panic("pgtable_cache_init(): cannot create pmd cache"); 2.5 } 2.6 pgd_cache = kmem_cache_create("pgd", 2.7 -#if 0 /* How the heck _this_ works in native linux ??? */ 2.8 +#ifndef CONFIG_XEN 2.9 PTRS_PER_PGD*sizeof(pgd_t), 2.10 PTRS_PER_PGD*sizeof(pgd_t), 2.11 #else 2.12 @@ -717,7 +717,7 @@ void __init pgtable_cache_init(void) 2.13 #endif 2.14 0, 2.15 pgd_ctor, 2.16 - pgd_dtor); 2.17 + PTRS_PER_PMD == 1 ? pgd_dtor : NULL); 2.18 if (!pgd_cache) 2.19 panic("pgtable_cache_init(): Cannot create pgd cache"); 2.20 }
3.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Fri Nov 25 11:19:03 2005 +0000 3.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Fri Nov 25 11:19:09 2005 +0000 3.3 @@ -28,8 +28,6 @@ 3.4 #include <asm/hypervisor.h> 3.5 3.6 static void pgd_test_and_unpin(pgd_t *pgd); 3.7 -#define suspend_disable preempt_disable 3.8 -#define suspend_enable preempt_enable 3.9 3.10 void show_mem(void) 3.11 { 3.12 @@ -279,26 +277,31 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 3.13 { 3.14 unsigned long flags; 3.15 3.16 -#ifdef CONFIG_X86_PAE 3.17 - /* Ensure pgd resides below 4GB. */ 3.18 - int rc = xen_create_contiguous_region((unsigned long)pgd, 0, 32); 3.19 - BUG_ON(rc); 3.20 + if (PTRS_PER_PMD > 1) { 3.21 +#ifdef CONFIG_XEN 3.22 + /* Ensure pgd resides below 4GB. */ 3.23 + int rc = xen_create_contiguous_region( 3.24 + (unsigned long)pgd, 0, 32); 3.25 + BUG_ON(rc); 3.26 #endif 3.27 - 3.28 - if (HAVE_SHARED_KERNEL_PMD) { 3.29 + if (HAVE_SHARED_KERNEL_PMD) 3.30 + memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 3.31 + swapper_pg_dir, sizeof(pgd_t)); 3.32 + } else { 3.33 + if (!HAVE_SHARED_KERNEL_PMD) 3.34 + spin_lock_irqsave(&pgd_lock, flags); 3.35 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 3.36 swapper_pg_dir + USER_PTRS_PER_PGD, 3.37 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 3.38 - return; 3.39 + memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 3.40 + if (!HAVE_SHARED_KERNEL_PMD) { 3.41 + pgd_list_add(pgd); 3.42 + spin_unlock_irqrestore(&pgd_lock, flags); 3.43 + } 3.44 } 3.45 - 3.46 - memset(pgd, 0, PTRS_PER_PGD*sizeof(pgd_t)); 3.47 - 3.48 - spin_lock_irqsave(&pgd_lock, flags); 3.49 - pgd_list_add(pgd); 3.50 - spin_unlock_irqrestore(&pgd_lock, flags); 3.51 } 3.52 3.53 +/* never called when PTRS_PER_PMD > 1 */ 3.54 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 3.55 { 3.56 unsigned long flags; /* can be called from interrupt context */ 3.57 @@ -315,7 +318,7 @@ void pgd_dtor(void *pgd, kmem_cache_t *c 3.58 3.59 pgd_t *pgd_alloc(struct mm_struct *mm) 3.60 { 3.61 - int i = 0; 3.62 + int i; 3.63 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 3.64 3.65 pgd_test_and_unpin(pgd); 3.66 @@ -323,34 +326,31 @@ pgd_t *pgd_alloc(struct mm_struct *mm) 3.67 if (PTRS_PER_PMD == 1 || !pgd) 3.68 return pgd; 3.69 3.70 + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 3.71 + pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 3.72 + if (!pmd) 3.73 + goto out_oom; 3.74 + set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); 3.75 + } 3.76 + 3.77 if (!HAVE_SHARED_KERNEL_PMD) { 3.78 - /* alloc and copy kernel pmd */ 3.79 unsigned long flags; 3.80 pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET); 3.81 pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET); 3.82 pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET); 3.83 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 3.84 - if (0 == pmd) 3.85 + ++i; 3.86 + if (!pmd) 3.87 goto out_oom; 3.88 3.89 spin_lock_irqsave(&pgd_lock, flags); 3.90 memcpy(pmd, copy_pmd, PAGE_SIZE); 3.91 - spin_unlock_irqrestore(&pgd_lock, flags); 3.92 make_lowmem_page_readonly(pmd); 3.93 set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd))); 3.94 + pgd_list_add(pgd); 3.95 + spin_unlock_irqrestore(&pgd_lock, flags); 3.96 } 3.97 3.98 - /* alloc user pmds */ 3.99 - for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 3.100 - pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 3.101 - if (!pmd) 3.102 - goto out_oom; 3.103 - suspend_disable(); 3.104 - if (test_bit(PG_pinned, &virt_to_page(pgd)->flags)) 3.105 - make_lowmem_page_readonly(pmd); 3.106 - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); 3.107 - suspend_enable(); 3.108 - } 3.109 return pgd; 3.110 3.111 out_oom: 3.112 @@ -364,28 +364,25 @@ void pgd_free(pgd_t *pgd) 3.113 { 3.114 int i; 3.115 3.116 - suspend_disable(); 3.117 pgd_test_and_unpin(pgd); 3.118 3.119 /* in the PAE case user pgd entries are overwritten before usage */ 3.120 if (PTRS_PER_PMD > 1) { 3.121 for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 3.122 pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); 3.123 - set_pgd(&pgd[i], __pgd(0)); 3.124 - make_lowmem_page_writable(pmd); 3.125 kmem_cache_free(pmd_cache, pmd); 3.126 } 3.127 if (!HAVE_SHARED_KERNEL_PMD) { 3.128 + unsigned long flags; 3.129 pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1); 3.130 - set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(0)); 3.131 + spin_lock_irqsave(&pgd_lock, flags); 3.132 + pgd_list_del(pgd); 3.133 + spin_unlock_irqrestore(&pgd_lock, flags); 3.134 make_lowmem_page_writable(pmd); 3.135 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); 3.136 kmem_cache_free(pmd_cache, pmd); 3.137 } 3.138 } 3.139 - 3.140 - suspend_enable(); 3.141 - 3.142 /* in the non-PAE case, free_pgtables() clears user pgd entries */ 3.143 kmem_cache_free(pgd_cache, pgd); 3.144 } 3.145 @@ -510,9 +507,6 @@ static void pgd_walk(pgd_t *pgd_base, pg 3.146 3.147 static void __pgd_pin(pgd_t *pgd) 3.148 { 3.149 - /* PAE PGDs with no kernel PMD cannot be pinned. Bail right now. */ 3.150 - if ((PTRS_PER_PMD > 1) && pgd_none(pgd[USER_PTRS_PER_PGD])) 3.151 - return; 3.152 pgd_walk(pgd, PAGE_KERNEL_RO); 3.153 xen_pgd_pin(__pa(pgd)); 3.154 set_bit(PG_pinned, &virt_to_page(pgd)->flags); 3.155 @@ -527,10 +521,8 @@ static void __pgd_unpin(pgd_t *pgd) 3.156 3.157 static void pgd_test_and_unpin(pgd_t *pgd) 3.158 { 3.159 - suspend_disable(); 3.160 if (test_bit(PG_pinned, &virt_to_page(pgd)->flags)) 3.161 __pgd_unpin(pgd); 3.162 - suspend_enable(); 3.163 } 3.164 3.165 void mm_pin(struct mm_struct *mm)
4.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig Fri Nov 25 11:19:03 2005 +0000 4.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig Fri Nov 25 11:19:09 2005 +0000 4.3 @@ -18,15 +18,6 @@ config TCG_TPM 4.4 compile this driver as a module, choose M here; the module 4.5 will be called tpm. If unsure, say N. 4.6 4.7 -config TCG_TIS 4.8 - tristate "TPM Interface Specification 1.2 Interface" 4.9 - depends on TCG_TPM 4.10 - ---help--- 4.11 - If you have a TPM security chip that is compliant with the 4.12 - TCG TIS 1.2 TPM specification say Yes and it will be accessible 4.13 - from within Linux. To compile this driver as a module, choose 4.14 - M here; the module will be called tpm_tis. 4.15 - 4.16 config TCG_NSC 4.17 tristate "National Semiconductor TPM Interface" 4.18 depends on TCG_TPM
5.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Fri Nov 25 11:19:03 2005 +0000 5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Fri Nov 25 11:19:09 2005 +0000 5.3 @@ -28,7 +28,7 @@ 5.4 #endif 5.5 5.6 typedef struct tpmif_st { 5.7 - struct list_head tpmif_list; 5.8 + struct list_head tpmif_list; 5.9 /* Unique identifier for this interface. */ 5.10 domid_t domid; 5.11 unsigned int handle; 5.12 @@ -83,6 +83,11 @@ extern int num_frontends; 5.13 5.14 #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE)) 5.15 5.16 +#ifndef TRUE 5.17 +#define TRUE 1 5.18 +#define FALSE 0 5.19 +#endif 5.20 + 5.21 #endif /* __TPMIF__BACKEND__COMMON_H__ */ 5.22 5.23 /*
6.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Fri Nov 25 11:19:03 2005 +0000 6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Fri Nov 25 11:19:09 2005 +0000 6.3 @@ -127,6 +127,10 @@ tpmif_map(tpmif_t *tpmif, unsigned long 6.4 .u.bind_interdomain.remote_dom = tpmif->domid, 6.5 .u.bind_interdomain.remote_port = evtchn }; 6.6 6.7 + if (tpmif->irq) { 6.8 + return 0; 6.9 + } 6.10 + 6.11 if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL) 6.12 return -ENOMEM; 6.13 6.14 @@ -149,7 +153,6 @@ tpmif_map(tpmif_t *tpmif, unsigned long 6.15 6.16 tpmif->irq = bind_evtchn_to_irqhandler( 6.17 tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif); 6.18 - tpmif->status = CONNECTED; 6.19 tpmif->shmem_ref = shared_page; 6.20 tpmif->active = 1; 6.21
7.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Fri Nov 25 11:19:03 2005 +0000 7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Fri Nov 25 11:19:09 2005 +0000 7.3 @@ -1,4 +1,5 @@ 7.4 /* Xenbus code for tpmif backend 7.5 + Copyright (C) 2005 IBM Corporation 7.6 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 7.7 7.8 This program is free software; you can redistribute it and/or modify 7.9 @@ -29,72 +30,189 @@ struct backend_info 7.10 7.11 long int frontend_id; 7.12 long int instance; // instance of TPM 7.13 + u8 is_instance_set;// whether instance number has been set 7.14 7.15 /* watch front end for changes */ 7.16 struct xenbus_watch backend_watch; 7.17 + XenbusState frontend_state; 7.18 +}; 7.19 7.20 - struct xenbus_watch watch; 7.21 - char * frontpath; 7.22 -}; 7.23 +static void maybe_connect(struct backend_info *be); 7.24 +static void connect(struct backend_info *be); 7.25 +static int connect_ring(struct backend_info *be); 7.26 +static void backend_changed(struct xenbus_watch *watch, 7.27 + const char **vec, unsigned int len); 7.28 +static void frontend_changed(struct xenbus_device *dev, 7.29 + XenbusState frontend_state); 7.30 7.31 static int tpmback_remove(struct xenbus_device *dev) 7.32 { 7.33 struct backend_info *be = dev->data; 7.34 7.35 - if (be->watch.node) 7.36 - unregister_xenbus_watch(&be->watch); 7.37 - unregister_xenbus_watch(&be->backend_watch); 7.38 + if (be->backend_watch.node) { 7.39 + unregister_xenbus_watch(&be->backend_watch); 7.40 + kfree(be->backend_watch.node); 7.41 + be->backend_watch.node = NULL; 7.42 + } 7.43 + if (be->tpmif) { 7.44 + tpmif_put(be->tpmif); 7.45 + be->tpmif = NULL; 7.46 + } 7.47 + kfree(be); 7.48 + dev->data = NULL; 7.49 + return 0; 7.50 +} 7.51 7.52 - tpmif_vtpm_close(be->instance); 7.53 +static int tpmback_probe(struct xenbus_device *dev, 7.54 + const struct xenbus_device_id *id) 7.55 +{ 7.56 + int err; 7.57 + struct backend_info *be = kmalloc(sizeof(struct backend_info), 7.58 + GFP_KERNEL); 7.59 7.60 - if (be->tpmif) 7.61 - tpmif_put(be->tpmif); 7.62 + if (!be) { 7.63 + xenbus_dev_fatal(dev, -ENOMEM, 7.64 + "allocating backend structure"); 7.65 + return -ENOMEM; 7.66 + } 7.67 + 7.68 + memset(be, 0, sizeof(*be)); 7.69 + 7.70 + be->is_instance_set = FALSE; 7.71 + be->dev = dev; 7.72 + dev->data = be; 7.73 7.74 - kfree(be->frontpath); 7.75 - kfree(be); 7.76 + err = xenbus_watch_path2(dev, dev->nodename, 7.77 + "instance", &be->backend_watch, 7.78 + backend_changed); 7.79 + if (err) { 7.80 + goto fail; 7.81 + } 7.82 + 7.83 + err = xenbus_switch_state(dev, NULL, XenbusStateInitWait); 7.84 + if (err) { 7.85 + goto fail; 7.86 + } 7.87 return 0; 7.88 +fail: 7.89 + tpmback_remove(dev); 7.90 + return err; 7.91 } 7.92 7.93 7.94 -static void frontend_changed(struct xenbus_watch *watch, 7.95 - const char **vec, unsigned int len) 7.96 +static void backend_changed(struct xenbus_watch *watch, 7.97 + const char **vec, unsigned int len) 7.98 { 7.99 - unsigned long ringref; 7.100 - unsigned int evtchn; 7.101 - unsigned long ready = 1; 7.102 int err; 7.103 - struct xenbus_transaction *xbt; 7.104 + long instance; 7.105 struct backend_info *be 7.106 - = container_of(watch, struct backend_info, watch); 7.107 + = container_of(watch, struct backend_info, backend_watch); 7.108 + struct xenbus_device *dev = be->dev; 7.109 + 7.110 + err = xenbus_scanf(NULL, dev->nodename, 7.111 + "instance","%li", &instance); 7.112 + if (XENBUS_EXIST_ERR(err)) { 7.113 + return; 7.114 + } 7.115 7.116 - /* If other end is gone, delete ourself. */ 7.117 - if (vec && !xenbus_exists(NULL, be->frontpath, "")) { 7.118 - xenbus_rm(NULL, be->dev->nodename, ""); 7.119 - device_unregister(&be->dev->dev); 7.120 + if (err != 1) { 7.121 + xenbus_dev_fatal(dev, err, "reading instance"); 7.122 + return; 7.123 + } 7.124 + 7.125 + if (be->is_instance_set != FALSE && be->instance != instance) { 7.126 + printk(KERN_WARNING 7.127 + "tpmback: changing instance (from %ld to %ld) " 7.128 + "not allowed.\n", 7.129 + be->instance, instance); 7.130 return; 7.131 } 7.132 7.133 + if (be->is_instance_set == FALSE) { 7.134 + be->tpmif = tpmif_find(dev->otherend_id, 7.135 + instance); 7.136 + if (IS_ERR(be->tpmif)) { 7.137 + err = PTR_ERR(be->tpmif); 7.138 + be->tpmif = NULL; 7.139 + xenbus_dev_fatal(dev,err,"creating block interface"); 7.140 + return; 7.141 + } 7.142 + be->instance = instance; 7.143 + be->is_instance_set = TRUE; 7.144 + 7.145 + /* 7.146 + * There's an unfortunate problem: 7.147 + * Sometimes after a suspend/resume the 7.148 + * state switch to XenbusStateInitialised happens 7.149 + * *before* I get to this point here. Since then 7.150 + * the connect_ring() must have failed (be->tpmif is 7.151 + * still NULL), I just call it here again indirectly. 7.152 + */ 7.153 + if (be->frontend_state == XenbusStateInitialised) { 7.154 + frontend_changed(dev, be->frontend_state); 7.155 + } 7.156 + } 7.157 +} 7.158 + 7.159 + 7.160 +static void frontend_changed(struct xenbus_device *dev, 7.161 + XenbusState frontend_state) 7.162 +{ 7.163 + struct backend_info *be = dev->data; 7.164 + int err; 7.165 + 7.166 + be->frontend_state = frontend_state; 7.167 + 7.168 + switch (frontend_state) { 7.169 + case XenbusStateInitialising: 7.170 + case XenbusStateConnected: 7.171 + break; 7.172 + 7.173 + case XenbusStateInitialised: 7.174 + err = connect_ring(be); 7.175 + if (err) { 7.176 + return; 7.177 + } 7.178 + maybe_connect(be); 7.179 + break; 7.180 + 7.181 + case XenbusStateClosing: 7.182 + xenbus_switch_state(dev, NULL, XenbusStateClosing); 7.183 + break; 7.184 + 7.185 + case XenbusStateClosed: 7.186 + /* 7.187 + * Notify the vTPM manager about the front-end 7.188 + * having left. 7.189 + */ 7.190 + tpmif_vtpm_close(be->instance); 7.191 + device_unregister(&be->dev->dev); 7.192 + break; 7.193 + 7.194 + case XenbusStateUnknown: 7.195 + case XenbusStateInitWait: 7.196 + default: 7.197 + xenbus_dev_fatal(dev, -EINVAL, 7.198 + "saw state %d at frontend", 7.199 + frontend_state); 7.200 + break; 7.201 + } 7.202 +} 7.203 + 7.204 + 7.205 + 7.206 +static void maybe_connect(struct backend_info *be) 7.207 +{ 7.208 + int err; 7.209 + 7.210 if (be->tpmif == NULL || be->tpmif->status == CONNECTED) 7.211 return; 7.212 7.213 - err = xenbus_gather(NULL, be->frontpath, 7.214 - "ring-ref", "%lu", &ringref, 7.215 - "event-channel", "%u", &evtchn, NULL); 7.216 - if (err) { 7.217 - xenbus_dev_error(be->dev, err, 7.218 - "reading %s/ring-ref and event-channel", 7.219 - be->frontpath); 7.220 - return; 7.221 - } 7.222 + connect(be); 7.223 7.224 - err = tpmif_map(be->tpmif, ringref, evtchn); 7.225 - if (err) { 7.226 - xenbus_dev_error(be->dev, err, 7.227 - "mapping shared-frame %lu port %u", 7.228 - ringref, evtchn); 7.229 - return; 7.230 - } 7.231 - 7.232 + /* 7.233 + * Notify the vTPM manager about a new front-end. 7.234 + */ 7.235 err = tpmif_vtpm_open(be->tpmif, 7.236 be->frontend_id, 7.237 be->instance); 7.238 @@ -107,157 +225,75 @@ static void frontend_changed(struct xenb 7.239 */ 7.240 return; 7.241 } 7.242 +} 7.243 7.244 - /* 7.245 - * Tell the front-end that we are ready to go - 7.246 - * unless something bad happens 7.247 - */ 7.248 + 7.249 +static void connect(struct backend_info *be) 7.250 +{ 7.251 + struct xenbus_transaction *xbt; 7.252 + int err; 7.253 + struct xenbus_device *dev = be->dev; 7.254 + unsigned long ready = 1; 7.255 + 7.256 again: 7.257 xbt = xenbus_transaction_start(); 7.258 if (IS_ERR(xbt)) { 7.259 - xenbus_dev_error(be->dev, err, "starting transaction"); 7.260 + err = PTR_ERR(xbt); 7.261 + xenbus_dev_fatal(be->dev, err, "starting transaction"); 7.262 return; 7.263 } 7.264 7.265 err = xenbus_printf(xbt, be->dev->nodename, 7.266 "ready", "%lu", ready); 7.267 if (err) { 7.268 - xenbus_dev_error(be->dev, err, "writing 'ready'"); 7.269 + xenbus_dev_fatal(be->dev, err, "writing 'ready'"); 7.270 goto abort; 7.271 } 7.272 7.273 + err = xenbus_switch_state(dev, xbt, XenbusStateConnected); 7.274 + if (err) 7.275 + goto abort; 7.276 + 7.277 + be->tpmif->status = CONNECTED; 7.278 + 7.279 err = xenbus_transaction_end(xbt, 0); 7.280 if (err == -EAGAIN) 7.281 goto again; 7.282 if (err) { 7.283 - xenbus_dev_error(be->dev, err, "end of transaction"); 7.284 - goto abort; 7.285 + xenbus_dev_fatal(be->dev, err, "end of transaction"); 7.286 } 7.287 - 7.288 - xenbus_dev_ok(be->dev); 7.289 return; 7.290 abort: 7.291 xenbus_transaction_end(xbt, 1); 7.292 } 7.293 7.294 7.295 -static void backend_changed(struct xenbus_watch *watch, 7.296 - const char **vec, unsigned int len) 7.297 +static int connect_ring(struct backend_info *be) 7.298 { 7.299 - int err; 7.300 - long int instance; 7.301 - struct backend_info *be 7.302 - = container_of(watch, struct backend_info, backend_watch); 7.303 struct xenbus_device *dev = be->dev; 7.304 - 7.305 - err = xenbus_scanf(NULL, dev->nodename, "instance", "%li", &instance); 7.306 - if (XENBUS_EXIST_ERR(err)) 7.307 - return; 7.308 - if (err < 0) { 7.309 - xenbus_dev_error(dev, err, "reading 'instance' variable"); 7.310 - return; 7.311 - } 7.312 - 7.313 - if (be->instance != -1 && be->instance != instance) { 7.314 - printk(KERN_WARNING 7.315 - "cannot change the instance\n"); 7.316 - return; 7.317 - } 7.318 - be->instance = instance; 7.319 - 7.320 - if (be->tpmif == NULL) { 7.321 - unsigned int len = max(XS_WATCH_PATH, XS_WATCH_TOKEN) + 1; 7.322 - const char *vec[len]; 7.323 - 7.324 - be->tpmif = tpmif_find(be->frontend_id, 7.325 - instance); 7.326 - if (IS_ERR(be->tpmif)) { 7.327 - err = PTR_ERR(be->tpmif); 7.328 - be->tpmif = NULL; 7.329 - xenbus_dev_error(dev, err, "creating interface"); 7.330 - return; 7.331 - } 7.332 - 7.333 - vec[XS_WATCH_PATH] = be->frontpath; 7.334 - vec[XS_WATCH_TOKEN] = NULL; 7.335 - 7.336 - /* Pass in NULL node to skip exist test. */ 7.337 - frontend_changed(&be->watch, vec, len); 7.338 - } 7.339 -} 7.340 - 7.341 - 7.342 -static int tpmback_probe(struct xenbus_device *dev, 7.343 - const struct xenbus_device_id *id) 7.344 -{ 7.345 - struct backend_info *be; 7.346 - char *frontend; 7.347 + unsigned long ring_ref; 7.348 + unsigned int evtchn; 7.349 int err; 7.350 7.351 - be = kmalloc(sizeof(*be), GFP_KERNEL); 7.352 - if (!be) { 7.353 - xenbus_dev_error(dev, -ENOMEM, "allocating backend structure"); 7.354 - err = -ENOMEM; 7.355 - } 7.356 - 7.357 - memset(be, 0, sizeof(*be)); 7.358 - 7.359 - frontend = NULL; 7.360 - err = xenbus_gather(NULL, dev->nodename, 7.361 - "frontend-id", "%li", &be->frontend_id, 7.362 - "frontend", NULL, &frontend, 7.363 - NULL); 7.364 - if (XENBUS_EXIST_ERR(err)) 7.365 - goto free_be; 7.366 - if (err < 0) { 7.367 + err = xenbus_gather(NULL, dev->otherend, 7.368 + "ring-ref", "%lu", &ring_ref, 7.369 + "event-channel", "%u", &evtchn, NULL); 7.370 + if (err) { 7.371 xenbus_dev_error(dev, err, 7.372 - "reading %s/frontend or frontend-id", 7.373 - dev->nodename); 7.374 - goto free_be; 7.375 - } 7.376 - if (strlen(frontend) == 0 || !xenbus_exists(NULL, frontend, "")) { 7.377 - /* If we can't get a frontend path and a frontend-id, 7.378 - * then our bus-id is no longer valid and we need to 7.379 - * destroy the backend device. 7.380 - */ 7.381 - err = -ENOENT; 7.382 - goto free_be; 7.383 + "reading %s/ring-ref and event-channel", 7.384 + dev->otherend); 7.385 + return err; 7.386 } 7.387 - 7.388 - be->dev = dev; 7.389 - be->backend_watch.node = dev->nodename; 7.390 - /* Implicitly calls backend_changed() once. */ 7.391 - be->backend_watch.callback = backend_changed; 7.392 - be->instance = -1; 7.393 - err = register_xenbus_watch(&be->backend_watch); 7.394 - if (err) { 7.395 - be->backend_watch.node = NULL; 7.396 - xenbus_dev_error(dev, err, "adding backend watch on %s", 7.397 - dev->nodename); 7.398 - goto free_be; 7.399 + if (be->tpmif != NULL) { 7.400 + err = tpmif_map(be->tpmif, ring_ref, evtchn); 7.401 + if (err) { 7.402 + xenbus_dev_error(dev, err, 7.403 + "mapping shared-frame %lu port %u", 7.404 + ring_ref, evtchn); 7.405 + return err; 7.406 + } 7.407 } 7.408 - 7.409 - be->frontpath = frontend; 7.410 - be->watch.node = be->frontpath; 7.411 - be->watch.callback = frontend_changed; 7.412 - err = register_xenbus_watch(&be->watch); 7.413 - if (err) { 7.414 - be->watch.node = NULL; 7.415 - xenbus_dev_error(dev, err, 7.416 - "adding frontend watch on %s", 7.417 - be->frontpath); 7.418 - goto free_be; 7.419 - } 7.420 - 7.421 - dev->data = be; 7.422 - return err; 7.423 - 7.424 -free_be: 7.425 - if (be->backend_watch.node) 7.426 - unregister_xenbus_watch(&be->backend_watch); 7.427 - kfree(frontend); 7.428 - kfree(be); 7.429 - return err; 7.430 + return 0; 7.431 } 7.432 7.433 7.434 @@ -273,6 +309,7 @@ static struct xenbus_driver tpmback = { 7.435 .ids = tpmback_ids, 7.436 .probe = tpmback_probe, 7.437 .remove = tpmback_remove, 7.438 + .otherend_changed = frontend_changed, 7.439 }; 7.440 7.441
8.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Fri Nov 25 11:19:03 2005 +0000 8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Fri Nov 25 11:19:09 2005 +0000 8.3 @@ -73,7 +73,8 @@ static void tpmif_rx_action(unsigned lon 8.4 static void tpmif_connect(u16 evtchn, domid_t domid); 8.5 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0); 8.6 static int tpm_allocate_buffers(struct tpm_private *tp); 8.7 -static void tpmif_set_connected_state(struct tpm_private *tp, int newstate); 8.8 +static void tpmif_set_connected_state(struct tpm_private *tp, 8.9 + u8 newstate); 8.10 static int tpm_xmit(struct tpm_private *tp, 8.11 const u8 * buf, size_t count, int userbuffer, 8.12 void *remember); 8.13 @@ -212,87 +213,46 @@ static int tpm_fe_send_upperlayer(const 8.14 XENBUS support code 8.15 **************************************************************/ 8.16 8.17 -static void watch_for_status(struct xenbus_watch *watch, 8.18 - const char **vec, unsigned int len) 8.19 -{ 8.20 - struct tpmfront_info *info; 8.21 - int err; 8.22 - unsigned long ready; 8.23 - struct tpm_private *tp = &my_private; 8.24 - const char *node = vec[XS_WATCH_PATH]; 8.25 - 8.26 - info = container_of(watch, struct tpmfront_info, watch); 8.27 - node += strlen(watch->node); 8.28 - 8.29 - if (tp->connected) 8.30 - return; 8.31 - 8.32 - err = xenbus_gather(NULL, watch->node, 8.33 - "ready", "%lu", &ready, 8.34 - NULL); 8.35 - if (err) { 8.36 - xenbus_dev_error(info->dev, err, "reading 'ready' field"); 8.37 - return; 8.38 - } 8.39 - 8.40 - tpmif_set_connected_state(tp, 1); 8.41 - 8.42 - xenbus_dev_ok(info->dev); 8.43 -} 8.44 - 8.45 - 8.46 static int setup_tpmring(struct xenbus_device *dev, 8.47 - struct tpmfront_info * info, 8.48 - domid_t backend_id) 8.49 + struct tpmfront_info * info) 8.50 { 8.51 tpmif_tx_interface_t *sring; 8.52 struct tpm_private *tp = &my_private; 8.53 int err; 8.54 - evtchn_op_t op = { 8.55 - .cmd = EVTCHNOP_alloc_unbound, 8.56 - .u.alloc_unbound.dom = DOMID_SELF, 8.57 - .u.alloc_unbound.remote_dom = backend_id } ; 8.58 8.59 sring = (void *)__get_free_page(GFP_KERNEL); 8.60 if (!sring) { 8.61 - xenbus_dev_error(dev, -ENOMEM, "allocating shared ring"); 8.62 + xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 8.63 return -ENOMEM; 8.64 } 8.65 tp->tx = sring; 8.66 8.67 tpm_allocate_buffers(tp); 8.68 8.69 - err = gnttab_grant_foreign_access(backend_id, 8.70 - (virt_to_machine(tp->tx) >> PAGE_SHIFT), 8.71 - 0); 8.72 - 8.73 - if (err == -ENOSPC) { 8.74 + err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx)); 8.75 + if (err < 0) { 8.76 free_page((unsigned long)sring); 8.77 tp->tx = NULL; 8.78 - xenbus_dev_error(dev, err, "allocating grant reference"); 8.79 - return err; 8.80 + xenbus_dev_fatal(dev, err, "allocating grant reference"); 8.81 + goto fail; 8.82 } 8.83 info->ring_ref = err; 8.84 8.85 - err = HYPERVISOR_event_channel_op(&op); 8.86 - if (err) { 8.87 - gnttab_end_foreign_access(info->ring_ref, 0, 8.88 - (unsigned long)sring); 8.89 - tp->tx = NULL; 8.90 - xenbus_dev_error(dev, err, "allocating event channel"); 8.91 - return err; 8.92 - } 8.93 + err = xenbus_alloc_evtchn(dev, &tp->evtchn); 8.94 + if (err) 8.95 + goto fail; 8.96 8.97 - tpmif_connect(op.u.alloc_unbound.port, backend_id); 8.98 + tpmif_connect(tp->evtchn, dev->otherend_id); 8.99 8.100 return 0; 8.101 +fail: 8.102 + return err; 8.103 } 8.104 8.105 8.106 static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp) 8.107 { 8.108 - tpmif_set_connected_state(tp,0); 8.109 - 8.110 + tpmif_set_connected_state(tp, FALSE); 8.111 if ( tp->tx != NULL ) { 8.112 gnttab_end_foreign_access(info->ring_ref, 0, 8.113 (unsigned long)tp->tx); 8.114 @@ -308,42 +268,20 @@ static void destroy_tpmring(struct tpmfr 8.115 static int talk_to_backend(struct xenbus_device *dev, 8.116 struct tpmfront_info *info) 8.117 { 8.118 - char *backend; 8.119 - const char *message; 8.120 + const char *message = NULL; 8.121 int err; 8.122 - int backend_id; 8.123 struct xenbus_transaction *xbt; 8.124 8.125 - backend = NULL; 8.126 - err = xenbus_gather(NULL, dev->nodename, 8.127 - "backend-id", "%i", &backend_id, 8.128 - "backend", NULL, &backend, 8.129 - NULL); 8.130 - if (XENBUS_EXIST_ERR(err)) 8.131 - goto out; 8.132 - if (backend && strlen(backend) == 0) { 8.133 - err = -ENOENT; 8.134 - goto out; 8.135 - } 8.136 - if (err < 0) { 8.137 - xenbus_dev_error(dev, err, "reading %s/backend or backend-id", 8.138 - dev->nodename); 8.139 - goto out; 8.140 - } 8.141 - 8.142 - info->backend_id = backend_id; 8.143 - my_private.backend_id = backend_id; 8.144 - 8.145 - err = setup_tpmring(dev, info, backend_id); 8.146 + err = setup_tpmring(dev, info); 8.147 if (err) { 8.148 - xenbus_dev_error(dev, err, "setting up ring"); 8.149 + xenbus_dev_fatal(dev, err, "setting up ring"); 8.150 goto out; 8.151 } 8.152 8.153 again: 8.154 xbt = xenbus_transaction_start(); 8.155 if (IS_ERR(xbt)) { 8.156 - xenbus_dev_error(dev, err, "starting transaction"); 8.157 + xenbus_dev_fatal(dev, err, "starting transaction"); 8.158 goto destroy_tpmring; 8.159 } 8.160 8.161 @@ -361,36 +299,62 @@ again: 8.162 goto abort_transaction; 8.163 } 8.164 8.165 + err = xenbus_switch_state(dev, xbt, XenbusStateInitialised); 8.166 + if (err) { 8.167 + goto abort_transaction; 8.168 + } 8.169 + 8.170 err = xenbus_transaction_end(xbt, 0); 8.171 if (err == -EAGAIN) 8.172 goto again; 8.173 if (err) { 8.174 - xenbus_dev_error(dev, err, "completing transaction"); 8.175 + xenbus_dev_fatal(dev, err, "completing transaction"); 8.176 goto destroy_tpmring; 8.177 } 8.178 - 8.179 - info->watch.node = backend; 8.180 - info->watch.callback = watch_for_status; 8.181 - err = register_xenbus_watch(&info->watch); 8.182 - if (err) { 8.183 - xenbus_dev_error(dev, err, "registering watch on backend"); 8.184 - goto destroy_tpmring; 8.185 - } 8.186 - 8.187 - info->backend = backend; 8.188 - 8.189 return 0; 8.190 8.191 abort_transaction: 8.192 xenbus_transaction_end(xbt, 1); 8.193 - xenbus_dev_error(dev, err, "%s", message); 8.194 + if (message) 8.195 + xenbus_dev_error(dev, err, "%s", message); 8.196 destroy_tpmring: 8.197 destroy_tpmring(info, &my_private); 8.198 out: 8.199 - kfree(backend); 8.200 return err; 8.201 } 8.202 8.203 +/** 8.204 + * Callback received when the backend's state changes. 8.205 + */ 8.206 +static void backend_changed(struct xenbus_device *dev, 8.207 + XenbusState backend_state) 8.208 +{ 8.209 + struct tpm_private *tp = &my_private; 8.210 + DPRINTK("\n"); 8.211 + 8.212 + switch (backend_state) { 8.213 + case XenbusStateInitialising: 8.214 + case XenbusStateInitWait: 8.215 + case XenbusStateInitialised: 8.216 + case XenbusStateUnknown: 8.217 + break; 8.218 + 8.219 + case XenbusStateConnected: 8.220 + tpmif_set_connected_state(tp, TRUE); 8.221 + break; 8.222 + 8.223 + case XenbusStateClosing: 8.224 + tpmif_set_connected_state(tp, FALSE); 8.225 + break; 8.226 + 8.227 + case XenbusStateClosed: 8.228 + if (tp->is_suspended == FALSE) { 8.229 + device_unregister(&dev->dev); 8.230 + } 8.231 + break; 8.232 + } 8.233 +} 8.234 + 8.235 8.236 static int tpmfront_probe(struct xenbus_device *dev, 8.237 const struct xenbus_device_id *id) 8.238 @@ -398,8 +362,6 @@ static int tpmfront_probe(struct xenbus_ 8.239 int err; 8.240 struct tpmfront_info *info; 8.241 int handle; 8.242 - int len = max(XS_WATCH_PATH, XS_WATCH_TOKEN) + 1; 8.243 - const char *vec[len]; 8.244 8.245 err = xenbus_scanf(NULL, dev->nodename, 8.246 "handle", "%i", &handle); 8.247 @@ -407,19 +369,19 @@ static int tpmfront_probe(struct xenbus_ 8.248 return err; 8.249 8.250 if (err < 0) { 8.251 - xenbus_dev_error(dev,err,"reading virtual-device"); 8.252 + xenbus_dev_fatal(dev,err,"reading virtual-device"); 8.253 return err; 8.254 } 8.255 8.256 info = kmalloc(sizeof(*info), GFP_KERNEL); 8.257 if (!info) { 8.258 - xenbus_dev_error(dev,err,"allocating info structure"); 8.259 + err = -ENOMEM; 8.260 + xenbus_dev_fatal(dev,err,"allocating info structure"); 8.261 return err; 8.262 } 8.263 memset(info, 0x0, sizeof(*info)); 8.264 8.265 info->dev = dev; 8.266 - info->handle = handle; 8.267 dev->data = info; 8.268 8.269 err = talk_to_backend(dev, info); 8.270 @@ -428,41 +390,33 @@ static int tpmfront_probe(struct xenbus_ 8.271 dev->data = NULL; 8.272 return err; 8.273 } 8.274 - 8.275 - vec[XS_WATCH_PATH] = info->watch.node; 8.276 - vec[XS_WATCH_TOKEN] = NULL; 8.277 - watch_for_status(&info->watch, vec, len); 8.278 - 8.279 return 0; 8.280 } 8.281 8.282 + 8.283 static int tpmfront_remove(struct xenbus_device *dev) 8.284 { 8.285 struct tpmfront_info *info = dev->data; 8.286 - if (info->backend) 8.287 - unregister_xenbus_watch(&info->watch); 8.288 8.289 destroy_tpmring(info, &my_private); 8.290 8.291 - kfree(info->backend); 8.292 kfree(info); 8.293 - 8.294 return 0; 8.295 } 8.296 8.297 static int 8.298 tpmfront_suspend(struct xenbus_device *dev) 8.299 { 8.300 - struct tpmfront_info *info = dev->data; 8.301 struct tpm_private *tp = &my_private; 8.302 u32 ctr = 0; 8.303 8.304 /* lock, so no app can send */ 8.305 down(&suspend_lock); 8.306 + tp->is_suspended = TRUE; 8.307 8.308 while (atomic_read(&tp->tx_busy) && ctr <= 25) { 8.309 - if ((ctr % 10) == 0) 8.310 - printk("INFO: Waiting for outstanding request.\n"); 8.311 + if ((ctr % 10) == 0) 8.312 + printk("TPM-FE [INFO]: Waiting for outstanding request.\n"); 8.313 /* 8.314 * Wait for a request to be responded to. 8.315 */ 8.316 @@ -474,15 +428,10 @@ tpmfront_suspend(struct xenbus_device *d 8.317 /* 8.318 * A temporary work-around. 8.319 */ 8.320 - printk("WARNING: Resetting busy flag."); 8.321 + printk("TPM-FE [WARNING]: Resetting busy flag."); 8.322 atomic_set(&tp->tx_busy, 0); 8.323 } 8.324 8.325 - unregister_xenbus_watch(&info->watch); 8.326 - 8.327 - kfree(info->backend); 8.328 - info->backend = NULL; 8.329 - 8.330 return 0; 8.331 } 8.332 8.333 @@ -492,8 +441,6 @@ tpmfront_resume(struct xenbus_device *de 8.334 struct tpmfront_info *info = dev->data; 8.335 int err = talk_to_backend(dev, info); 8.336 8.337 - /* unlock, so apps can resume sending */ 8.338 - up(&suspend_lock); 8.339 8.340 return err; 8.341 } 8.342 @@ -530,12 +477,13 @@ static struct xenbus_driver tpmfront = { 8.343 .probe = tpmfront_probe, 8.344 .remove = tpmfront_remove, 8.345 .resume = tpmfront_resume, 8.346 + .otherend_changed = backend_changed, 8.347 .suspend = tpmfront_suspend, 8.348 }; 8.349 8.350 static void __init init_tpm_xenbus(void) 8.351 { 8.352 - xenbus_register_driver(&tpmfront); 8.353 + xenbus_register_frontend(&tpmfront); 8.354 } 8.355 8.356 8.357 @@ -628,12 +576,13 @@ tpm_xmit(struct tpm_private *tp, 8.358 spin_lock_irq(&tp->tx_lock); 8.359 8.360 if (unlikely(atomic_read(&tp->tx_busy))) { 8.361 - printk("There's an outstanding request/response on the way!\n"); 8.362 + printk("tpm_xmit: There's an outstanding request/response " 8.363 + "on the way!\n"); 8.364 spin_unlock_irq(&tp->tx_lock); 8.365 return -EBUSY; 8.366 } 8.367 8.368 - if (tp->connected != 1) { 8.369 + if (tp->is_connected != TRUE) { 8.370 spin_unlock_irq(&tp->tx_lock); 8.371 return -EIO; 8.372 } 8.373 @@ -705,24 +654,40 @@ static void tpmif_notify_upperlayer(stru 8.374 down(&upperlayer_lock); 8.375 8.376 if (upperlayer_tpmfe != NULL) { 8.377 - switch (tp->connected) { 8.378 - case 1: 8.379 - upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED); 8.380 - break; 8.381 - 8.382 - default: 8.383 - upperlayer_tpmfe->status(0); 8.384 - break; 8.385 + if (tp->is_connected) { 8.386 + upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED); 8.387 + } else { 8.388 + upperlayer_tpmfe->status(0); 8.389 } 8.390 } 8.391 up(&upperlayer_lock); 8.392 } 8.393 8.394 8.395 -static void tpmif_set_connected_state(struct tpm_private *tp, int newstate) 8.396 +static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected) 8.397 { 8.398 - if (newstate != tp->connected) { 8.399 - tp->connected = newstate; 8.400 + /* 8.401 + * Don't notify upper layer if we are in suspend mode and 8.402 + * should disconnect - assumption is that we will resume 8.403 + * The semaphore keeps apps from sending. 8.404 + */ 8.405 + if (is_connected == FALSE && tp->is_suspended == TRUE) { 8.406 + return; 8.407 + } 8.408 + 8.409 + /* 8.410 + * Unlock the semaphore if we are connected again 8.411 + * after being suspended - now resuming. 8.412 + * This also removes the suspend state. 8.413 + */ 8.414 + if (is_connected == TRUE && tp->is_suspended == TRUE) { 8.415 + tp->is_suspended = FALSE; 8.416 + /* unlock, so apps can resume sending */ 8.417 + up(&suspend_lock); 8.418 + } 8.419 + 8.420 + if (is_connected != tp->is_connected) { 8.421 + tp->is_connected = is_connected; 8.422 tpmif_notify_upperlayer(tp); 8.423 } 8.424 }
9.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Fri Nov 25 11:19:03 2005 +0000 9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Fri Nov 25 11:19:09 2005 +0000 9.3 @@ -1,12 +1,17 @@ 9.4 #ifndef TPM_FRONT_H 9.5 #define TPM_FRONT_H 9.6 9.7 +#ifndef TRUE 9.8 +#define TRUE 1 9.9 +#define FALSE 0 9.10 +#endif 9.11 9.12 -struct tpm_private 9.13 -{ 9.14 +struct tpm_private { 9.15 tpmif_tx_interface_t *tx; 9.16 - unsigned int evtchn, irq; 9.17 - int connected; 9.18 + unsigned int evtchn; 9.19 + unsigned int irq; 9.20 + u8 is_connected; 9.21 + u8 is_suspended; 9.22 9.23 spinlock_t tx_lock; 9.24 9.25 @@ -16,25 +21,18 @@ struct tpm_private 9.26 void *tx_remember; 9.27 domid_t backend_id; 9.28 wait_queue_head_t wait_q; 9.29 + 9.30 }; 9.31 9.32 - 9.33 -struct tpmfront_info 9.34 -{ 9.35 - struct xenbus_watch watch; 9.36 - int handle; 9.37 +struct tpmfront_info { 9.38 struct xenbus_device *dev; 9.39 - char *backend; 9.40 int ring_ref; 9.41 - domid_t backend_id; 9.42 }; 9.43 9.44 - 9.45 -struct tx_buffer 9.46 -{ 9.47 +struct tx_buffer { 9.48 unsigned int size; // available space in data 9.49 unsigned int len; // used space in data 9.50 - unsigned char *data; // pointer to a page 9.51 + unsigned char *data; // pointer to a page 9.52 }; 9.53 9.54 #endif
10.1 --- a/patches/linux-2.6.12/pmd-shared.patch Fri Nov 25 11:19:03 2005 +0000 10.2 +++ b/patches/linux-2.6.12/pmd-shared.patch Fri Nov 25 11:19:09 2005 +0000 10.3 @@ -1,15 +1,3 @@ 10.4 -diff -urNpP linux-2.6.12/arch/i386/mm/init.c linux-2.6.12.new/arch/i386/mm/init.c 10.5 ---- linux-2.6.12/arch/i386/mm/init.c 2005-06-17 20:48:29.000000000 +0100 10.6 -+++ linux-2.6.12.new/arch/i386/mm/init.c 2005-07-11 16:28:09.778165582 +0100 10.7 -@@ -634,7 +634,7 @@ void __init pgtable_cache_init(void) 10.8 - PTRS_PER_PGD*sizeof(pgd_t), 10.9 - 0, 10.10 - pgd_ctor, 10.11 -- PTRS_PER_PMD == 1 ? pgd_dtor : NULL); 10.12 -+ pgd_dtor); 10.13 - if (!pgd_cache) 10.14 - panic("pgtable_cache_init(): Cannot create pgd cache"); 10.15 - } 10.16 diff -urNpP linux-2.6.12/arch/i386/mm/pageattr.c linux-2.6.12.new/arch/i386/mm/pageattr.c 10.17 --- linux-2.6.12/arch/i386/mm/pageattr.c 2005-06-17 20:48:29.000000000 +0100 10.18 +++ linux-2.6.12.new/arch/i386/mm/pageattr.c 2005-07-11 16:28:09.775165494 +0100 10.19 @@ -23,31 +11,45 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pa 10.20 10.21 spin_lock_irqsave(&pgd_lock, flags); 10.22 diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12.new/arch/i386/mm/pgtable.c 10.23 ---- linux-2.6.12/arch/i386/mm/pgtable.c 2005-06-17 20:48:29.000000000 +0100 10.24 -+++ linux-2.6.12.new/arch/i386/mm/pgtable.c 2005-07-11 16:32:01.478023726 +0100 10.25 -@@ -199,14 +199,14 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 10.26 +--- linux-2.6.12/arch/i386/mm/pgtable.c 2005-11-24 21:51:49.000000000 +0000 10.27 ++++ linux-2.6.12.new/arch/i386/mm/pgtable.c 2005-11-24 22:06:04.000000000 +0000 10.28 +@@ -199,19 +199,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 10.29 { 10.30 unsigned long flags; 10.31 10.32 - if (PTRS_PER_PMD == 1) 10.33 -+ if (!HAVE_SHARED_KERNEL_PMD) 10.34 - spin_lock_irqsave(&pgd_lock, flags); 10.35 - 10.36 - memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 10.37 - swapper_pg_dir + USER_PTRS_PER_PGD, 10.38 - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 10.39 - 10.40 +- spin_lock_irqsave(&pgd_lock, flags); 10.41 +- 10.42 +- memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 10.43 +- swapper_pg_dir + USER_PTRS_PER_PGD, 10.44 +- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 10.45 +- 10.46 - if (PTRS_PER_PMD > 1) 10.47 -+ if (HAVE_SHARED_KERNEL_PMD) 10.48 - return; 10.49 - 10.50 - pgd_list_add(pgd); 10.51 -@@ -214,11 +214,13 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 10.52 - memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 10.53 +- return; 10.54 +- 10.55 +- pgd_list_add(pgd); 10.56 +- spin_unlock_irqrestore(&pgd_lock, flags); 10.57 +- memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 10.58 ++ if (PTRS_PER_PMD > 1) { 10.59 ++ if (HAVE_SHARED_KERNEL_PMD) 10.60 ++ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 10.61 ++ swapper_pg_dir, sizeof(pgd_t)); 10.62 ++ } else { 10.63 ++ if (!HAVE_SHARED_KERNEL_PMD) 10.64 ++ spin_lock_irqsave(&pgd_lock, flags); 10.65 ++ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 10.66 ++ swapper_pg_dir + USER_PTRS_PER_PGD, 10.67 ++ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 10.68 ++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 10.69 ++ if (!HAVE_SHARED_KERNEL_PMD) { 10.70 ++ pgd_list_add(pgd); 10.71 ++ spin_unlock_irqrestore(&pgd_lock, flags); 10.72 ++ } 10.73 ++ } 10.74 } 10.75 10.76 --/* never called when PTRS_PER_PMD > 1 */ 10.77 - void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 10.78 + /* never called when PTRS_PER_PMD > 1 */ 10.79 +@@ -219,6 +222,9 @@ void pgd_dtor(void *pgd, kmem_cache_t *c 10.80 { 10.81 unsigned long flags; /* can be called from interrupt context */ 10.82 10.83 @@ -57,38 +59,32 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg 10.84 spin_lock_irqsave(&pgd_lock, flags); 10.85 pgd_list_del(pgd); 10.86 spin_unlock_irqrestore(&pgd_lock, flags); 10.87 -@@ -226,12 +228,29 @@ void pgd_dtor(void *pgd, kmem_cache_t *c 10.88 - 10.89 - pgd_t *pgd_alloc(struct mm_struct *mm) 10.90 - { 10.91 -- int i; 10.92 -+ int i = 0; 10.93 - pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 10.94 - 10.95 - if (PTRS_PER_PMD == 1 || !pgd) 10.96 - return pgd; 10.97 - 10.98 +@@ -238,6 +244,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm) 10.99 + goto out_oom; 10.100 + set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); 10.101 + } 10.102 ++ 10.103 + if (!HAVE_SHARED_KERNEL_PMD) { 10.104 -+ /* alloc and copy kernel pmd */ 10.105 + unsigned long flags; 10.106 + pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET); 10.107 + pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET); 10.108 + pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET); 10.109 + pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 10.110 -+ if (0 == pmd) 10.111 ++ ++i; 10.112 ++ if (!pmd) 10.113 + goto out_oom; 10.114 + 10.115 + spin_lock_irqsave(&pgd_lock, flags); 10.116 + memcpy(pmd, copy_pmd, PAGE_SIZE); 10.117 ++ set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd))); 10.118 ++ pgd_list_add(pgd); 10.119 + spin_unlock_irqrestore(&pgd_lock, flags); 10.120 -+ set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd))); 10.121 + } 10.122 + 10.123 -+ /* alloc user pmds */ 10.124 - for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 10.125 - pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 10.126 - if (!pmd) 10.127 -@@ -252,9 +271,16 @@ void pgd_free(pgd_t *pgd) 10.128 + return pgd; 10.129 + 10.130 + out_oom: 10.131 +@@ -252,9 +276,21 @@ void pgd_free(pgd_t *pgd) 10.132 int i; 10.133 10.134 /* in the PAE case user pgd entries are overwritten before usage */ 10.135 @@ -101,7 +97,12 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg 10.136 + kmem_cache_free(pmd_cache, pmd); 10.137 + } 10.138 + if (!HAVE_SHARED_KERNEL_PMD) { 10.139 ++ unsigned long flags; 10.140 + pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1); 10.141 ++ spin_lock_irqsave(&pgd_lock, flags); 10.142 ++ pgd_list_del(pgd); 10.143 ++ spin_unlock_irqrestore(&pgd_lock, flags); 10.144 ++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); 10.145 + kmem_cache_free(pmd_cache, pmd); 10.146 + } 10.147 + }
11.1 --- a/tools/examples/Makefile Fri Nov 25 11:19:03 2005 +0000 11.2 +++ b/tools/examples/Makefile Fri Nov 25 11:19:09 2005 +0000 11.3 @@ -26,9 +26,10 @@ XEN_SCRIPTS += network-route vif-route 11.4 XEN_SCRIPTS += network-nat vif-nat 11.5 XEN_SCRIPTS += block 11.6 XEN_SCRIPTS += block-enbd block-nbd 11.7 +XEN_SCRIPTS += vtpm 11.8 XEN_SCRIPT_DATA = xen-script-common.sh 11.9 XEN_SCRIPT_DATA += xen-hotplug-common.sh xen-network-common.sh vif-common.sh 11.10 -XEN_SCRIPT_DATA += block-common.sh 11.11 +XEN_SCRIPT_DATA += block-common.sh vtpm-common.sh 11.12 11.13 XEN_HOTPLUG_DIR = /etc/hotplug 11.14 XEN_HOTPLUG_SCRIPTS = xen-backend.agent
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/tools/examples/vtpm Fri Nov 25 11:19:09 2005 +0000 12.3 @@ -0,0 +1,23 @@ 12.4 +#!/bin/sh 12.5 + 12.6 +dir=$(dirname "$0") 12.7 +. "$dir/vtpm-common.sh" 12.8 + 12.9 + 12.10 +case "$command" in 12.11 + online | offline) 12.12 + exit 0 12.13 + ;; 12.14 +esac 12.15 + 12.16 +case "$command" in 12.17 + add) 12.18 + vtpm_create_instance 12.19 + ;; 12.20 + remove) 12.21 + vtpm_remove_instance 12.22 + ;; 12.23 +esac 12.24 + 12.25 +log debug "Successful vTPM operation '$command'." 12.26 +success
13.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 13.2 +++ b/tools/examples/vtpm-common.sh Fri Nov 25 11:19:09 2005 +0000 13.3 @@ -0,0 +1,281 @@ 13.4 +# 13.5 +# Copyright (c) 2005 IBM Corporation 13.6 +# Copyright (c) 2005 XenSource Ltd. 13.7 +# 13.8 +# This library is free software; you can redistribute it and/or 13.9 +# modify it under the terms of version 2.1 of the GNU Lesser General Public 13.10 +# License as published by the Free Software Foundation. 13.11 +# 13.12 +# This library is distributed in the hope that it will be useful, 13.13 +# but WITHOUT ANY WARRANTY; without even the implied warranty of 13.14 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13.15 +# Lesser General Public License for more details. 13.16 +# 13.17 +# You should have received a copy of the GNU Lesser General Public 13.18 +# License along with this library; if not, write to the Free Software 13.19 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 13.20 +# 13.21 + 13.22 +dir=$(dirname "$0") 13.23 +. "$dir/xen-hotplug-common.sh" 13.24 + 13.25 +findCommand "$@" 13.26 +if [ "$command" != "online" ] && 13.27 + [ "$command" != "offline" ] && 13.28 + [ "$command" != "add" ] && 13.29 + [ "$command" != "remove" ] 13.30 +then 13.31 + log err "Invalid command: $command" 13.32 + exit 1 13.33 +fi 13.34 + 13.35 + 13.36 +XENBUS_PATH="${XENBUS_PATH:?}" 13.37 + 13.38 + 13.39 +VTPMDB="/etc/xen/vtpm.db" 13.40 + 13.41 +#In the vtpm-impl file some commands should be defined: 13.42 +# vtpm_create, vtpm_setup, vtpm_reset, etc. (see below) 13.43 +#This should be indicated by setting VTPM_IMPL_DEFINED. 13.44 +if [ -r "$dir/vtpm-impl" ]; then 13.45 + . "$dir/vtpm-impl" 13.46 +fi 13.47 + 13.48 +if [ -z "$VTPM_IMPL_DEFINED" ]; then 13.49 + function vtpm_create () { 13.50 + true 13.51 + } 13.52 + function vtpm_setup() { 13.53 + true 13.54 + } 13.55 + function vtpm_reset() { 13.56 + true 13.57 + } 13.58 + function vtpm_suspend() { 13.59 + true 13.60 + } 13.61 + function vtpm_resume() { 13.62 + true 13.63 + } 13.64 +fi 13.65 + 13.66 +#Find the instance number for the vtpm given the name of the domain 13.67 +# Parameters 13.68 +# - vmname : the name of the vm 13.69 +# Return value 13.70 +# Returns '0' if instance number could not be found, otherwise 13.71 +# it returns the instance number in the variable 'instance' 13.72 +function find_instance () { 13.73 + local vmname=$1 13.74 + local ret=0 13.75 + instance=`cat $VTPMDB | \ 13.76 + awk -vvmname=$vmname \ 13.77 + '{ \ 13.78 + if ( 1 != index($1,"#")) { \ 13.79 + if ( $1 == vmname ) { \ 13.80 + print $2; \ 13.81 + exit; \ 13.82 + } \ 13.83 + } \ 13.84 + }'` 13.85 + if [ "$instance" != "" ]; then 13.86 + ret=1 13.87 + fi 13.88 + return $ret 13.89 +} 13.90 + 13.91 + 13.92 +# Check whether a particular instance number is still available 13.93 +# returns '1' if it is available 13.94 +function is_free_instancenum () { 13.95 + local instance=$1 13.96 + local avail=1 13.97 + 13.98 + #Allowed instance number range: 1-255 13.99 + if [ $instance -eq 0 -o $instance -gt 255 ]; then 13.100 + avail=0 13.101 + else 13.102 + instances=`cat $VTPMDB | \ 13.103 + gawk \ 13.104 + '{ \ 13.105 + if (1 != index($1,"#")) { \ 13.106 + printf("%s ",$2); \ 13.107 + } \ 13.108 + }'` 13.109 + for i in $instances; do 13.110 + if [ $i -eq $instance ]; then 13.111 + avail=0 13.112 + break 13.113 + fi 13.114 + done 13.115 + fi 13.116 + return $avail 13.117 +} 13.118 + 13.119 + 13.120 +# Get an available instance number given the database 13.121 +# Returns an unused instance number 13.122 +function get_free_instancenum () { 13.123 + local ctr 13.124 + local instances 13.125 + local don 13.126 + instances=`cat $VTPMDB | \ 13.127 + gawk \ 13.128 + '{ \ 13.129 + if (1 != index($1,"#")) { \ 13.130 + printf("%s ",$2); \ 13.131 + } \ 13.132 + }'` 13.133 + ctr=1 13.134 + don=0 13.135 + while [ $don -eq 0 ]; do 13.136 + local found 13.137 + found=0 13.138 + for i in $instances; do 13.139 + if [ $i -eq $ctr ]; then 13.140 + found=1; 13.141 + break; 13.142 + fi 13.143 + done 13.144 + 13.145 + if [ $found -eq 0 ]; then 13.146 + don=1 13.147 + break 13.148 + fi 13.149 + let ctr=ctr+1 13.150 + done 13.151 + let instance=$ctr 13.152 +} 13.153 + 13.154 + 13.155 +# Add a domain name and instance number to the DB file 13.156 +function add_instance () { 13.157 + local vmname=$1 13.158 + local inst=$2 13.159 + 13.160 + if [ ! -f $VTPMDB ]; then 13.161 + echo "#Database for VM to vTPM association" > $VTPMDB 13.162 + echo "#1st column: domain name" >> $VTPMDB 13.163 + echo "#2nd column: TPM instance number" >> $VTPMDB 13.164 + fi 13.165 + validate_entry $vmname $inst 13.166 + if [ $? -eq 0 ]; then 13.167 + echo "$vmname $inst" >> $VTPMDB 13.168 + fi 13.169 +} 13.170 + 13.171 + 13.172 +#Validate whether an entry is the same as passed to this 13.173 +#function 13.174 +function validate_entry () { 13.175 + local rc=0 13.176 + local vmname=$1 13.177 + local inst=$2 13.178 + local res 13.179 + res=`cat $VTPMDB | \ 13.180 + gawk -vvmname=$vmname \ 13.181 + -vinst=$inst \ 13.182 + '{ \ 13.183 + if ( 1 == index($1,"#")) {\ 13.184 + } else \ 13.185 + if ( $1 == vmname && \ 13.186 + $2 == inst) { \ 13.187 + printf("1"); \ 13.188 + exit; \ 13.189 + } else \ 13.190 + if ( $1 == vmname || \ 13.191 + $2 == inst) { \ 13.192 + printf("2"); \ 13.193 + exit; \ 13.194 + } \ 13.195 + }'` 13.196 + 13.197 + if [ "$res" == "1" ]; then 13.198 + let rc=1 13.199 + elif [ "$res" == "2" ]; then 13.200 + let rc=2 13.201 + fi 13.202 + return $rc 13.203 +} 13.204 + 13.205 + 13.206 +#Remove an entry from the vTPM database given its domain name 13.207 +function remove_entry () { 13.208 + local vmname=$1 13.209 + local VTPMDB_TMP="$VTPMDB".tmp 13.210 + `cat $VTPMDB | \ 13.211 + gawk -vvmname=$vmname \ 13.212 + '{ \ 13.213 + if ( $1 != vmname ) { \ 13.214 + print $0; \ 13.215 + } \ 13.216 + '} > $VTPMDB_TMP` 13.217 + if [ -e $VTPMDB_TMP ]; then 13.218 + mv -f $VTPMDB_TMP $VTPMDB 13.219 + else 13.220 + log err "Error creating temporary file '$VTPMDB_TMP'." 13.221 + fi 13.222 +} 13.223 + 13.224 + 13.225 +#Create a vTPM instance 13.226 +# If no entry in the TPM database is found, the instance is 13.227 +# created and an entry added to the database. 13.228 +function vtpm_create_instance () { 13.229 + local domname=$(xenstore_read "$XENBUS_PATH"/domain) 13.230 + local res 13.231 + set +e 13.232 + find_instance $domname 13.233 + res=$? 13.234 + if [ $res -eq 0 ]; then 13.235 + #Try to give the preferred instance to the domain 13.236 + instance=$(xenstore_read "$XENBUS_PATH"/pref_instance) 13.237 + if [ "$instance" != "" ]; then 13.238 + is_free_instancenum $instance 13.239 + res=$? 13.240 + if [ $res -eq 0 ]; then 13.241 + get_free_instancenum 13.242 + fi 13.243 + else 13.244 + get_free_instancenum 13.245 + fi 13.246 + add_instance $domname $instance 13.247 + if [ "$REASON" == "create" ]; then 13.248 + vtpm_create $instance 13.249 + elif [ "$REASON" == "hibernate" ]; then 13.250 + vtpm_resume $instance $domname 13.251 + else 13.252 + #default case for 'now' 13.253 + vtpm_create $instance 13.254 + fi 13.255 + fi 13.256 + if [ "$REASON" == "create" ]; then 13.257 + vtpm_reset $instance 13.258 + elif [ "$REASON" == "hibernate" ]; then 13.259 + vtpm_setup $instance 13.260 + else 13.261 + #default case for 'now' 13.262 + vtpm_reset $instance 13.263 + fi 13.264 + xenstore_write $XENBUS_PATH/instance $instance 13.265 + set -e 13.266 +} 13.267 + 13.268 + 13.269 +#Remove an instance 13.270 +function vtpm_remove_instance () { 13.271 + local domname=$(xenstore_read "$XENBUS_PATH"/domain) 13.272 + set +e 13.273 + find_instance $domname 13.274 + res=$? 13.275 + if [ $res -eq 0 ]; then 13.276 + #Something is really wrong with the DB 13.277 + log err "vTPM DB file $VTPMDB has no entry for '$domname'" 13.278 + else 13.279 + if [ "$REASON" == "hibernate" ]; then 13.280 + vtpm_suspend $instance 13.281 + fi 13.282 + fi 13.283 + set -e 13.284 +}
14.1 --- a/tools/examples/xen-backend.agent Fri Nov 25 11:19:03 2005 +0000 14.2 +++ b/tools/examples/xen-backend.agent Fri Nov 25 11:19:09 2005 +0000 14.3 @@ -6,6 +6,9 @@ case "$XENBUS_TYPE" in 14.4 vbd) 14.5 /etc/xen/scripts/block "$ACTION" 14.6 ;; 14.7 + vtpm) 14.8 + /etc/xen/scripts/vtpm "$ACTION" 14.9 + ;; 14.10 vif) 14.11 [ -n "$script" ] && $script "$ACTION" 14.12 ;;
15.1 --- a/tools/examples/xen-backend.rules Fri Nov 25 11:19:03 2005 +0000 15.2 +++ b/tools/examples/xen-backend.rules Fri Nov 25 11:19:09 2005 +0000 15.3 @@ -1,4 +1,5 @@ 15.4 SUBSYSTEM=="xen-backend", KERNEL=="vbd*", RUN+="/etc/xen/scripts/block $env{ACTION}" 15.5 +SUBSYSTEM=="xen-backend", KERNEL=="vtpm*", RUN+="/etc/xen/scripts/vtpm $env{ACTION}" 15.6 SUBSYSTEM=="xen-backend", KERNEL=="vif*", ACTION=="online", RUN+="$env{script} online" 15.7 SUBSYSTEM=="xen-backend", KERNEL=="vif*", ACTION=="offline", RUN+="$env{script} offline" 15.8 SUBSYSTEM=="xen-backend", ACTION=="remove", RUN+="/bin/bash -c '/usr/bin/xenstore-rm -t $$(/usr/bin/xenstore-read $env{XENBUS_PATH}/frontend)'"
16.1 --- a/tools/ioemu/exec.c Fri Nov 25 11:19:03 2005 +0000 16.2 +++ b/tools/ioemu/exec.c Fri Nov 25 11:19:09 2005 +0000 16.3 @@ -262,13 +262,24 @@ void cpu_register_physical_memory(target 16.4 unsigned long size, 16.5 unsigned long phys_offset) 16.6 { 16.7 - if (mmio_cnt == MAX_MMIO) { 16.8 - fprintf(logfile, "too many mmio regions\n"); 16.9 - exit(-1); 16.10 + int i; 16.11 + 16.12 + for (i = 0; i < mmio_cnt; i++) { 16.13 + if(mmio[i].start == start_addr) { 16.14 + mmio[i].io_index = phys_offset; 16.15 + mmio[i].size = size; 16.16 + return; 16.17 } 16.18 - mmio[mmio_cnt].io_index = phys_offset; 16.19 - mmio[mmio_cnt].start = start_addr; 16.20 - mmio[mmio_cnt++].size = size; 16.21 + } 16.22 + 16.23 + if (mmio_cnt == MAX_MMIO) { 16.24 + fprintf(logfile, "too many mmio regions\n"); 16.25 + exit(-1); 16.26 + } 16.27 + 16.28 + mmio[mmio_cnt].io_index = phys_offset; 16.29 + mmio[mmio_cnt].start = start_addr; 16.30 + mmio[mmio_cnt++].size = size; 16.31 } 16.32 16.33 /* mem_read and mem_write are arrays of functions containing the
17.1 --- a/tools/python/xen/xend/image.py Fri Nov 25 11:19:03 2005 +0000 17.2 +++ b/tools/python/xen/xend/image.py Fri Nov 25 11:19:09 2005 +0000 17.3 @@ -293,7 +293,7 @@ class VmxImageHandler(ImageHandler): 17.4 ret.append("-bridge") 17.5 ret.append("%s" % bridge) 17.6 if name == 'vtpm': 17.7 - instance = sxp.child_value(info, 'instance') 17.8 + instance = sxp.child_value(info, 'pref_instance') 17.9 ret.append("-instance") 17.10 ret.append("%s" % instance) 17.11 return ret
18.1 --- a/tools/python/xen/xend/server/tpmif.py Fri Nov 25 11:19:03 2005 +0000 18.2 +++ b/tools/python/xen/xend/server/tpmif.py Fri Nov 25 11:19:09 2005 +0000 18.3 @@ -38,10 +38,10 @@ class TPMifController(DevController): 18.4 def getDeviceDetails(self, config): 18.5 """@see DevController.getDeviceDetails""" 18.6 18.7 - devid = int(sxp.child_value(config, 'instance', '0')) 18.8 + devid = int(sxp.child_value(config, 'pref_instance', '0')) 18.9 log.info("The domain has a TPM with instance %d." % devid) 18.10 18.11 - back = { 'instance' : "%i" % devid } 18.12 + back = { 'pref_instance' : "%i" % devid } 18.13 front = { 'handle' : "%i" % devid } 18.14 18.15 return (devid, back, front)
19.1 --- a/tools/python/xen/xm/create.py Fri Nov 25 11:19:03 2005 +0000 19.2 +++ b/tools/python/xen/xm/create.py Fri Nov 25 11:19:09 2005 +0000 19.3 @@ -220,11 +220,9 @@ gopts.var('netif', val='no|yes', 19.4 fn=set_bool, default=0, 19.5 use="Make the domain a network interface backend.") 19.6 19.7 -gopts.var('tpmif', val='frontend=DOM', 19.8 - fn=append_value, default=[], 19.9 - use="""Make the domain a TPM interface backend. If frontend is given, 19.10 - the frontend in that domain is connected to this backend (not 19.11 - completely implemented, yet)""") 19.12 +gopts.var('tpmif', val='no|yes', 19.13 + fn=append_value, default=0, 19.14 + use="Make the domain a TPM interface backend.") 19.15 19.16 gopts.var('disk', val='phy:DEV,VDEV,MODE[,DOM]', 19.17 fn=append_value, default=[], 19.18 @@ -273,9 +271,13 @@ gopts.var('vif', val="type=TYPE,mac=MAC, 19.19 19.20 gopts.var('vtpm', val="instance=INSTANCE,backend=DOM", 19.21 fn=append_value, default=[], 19.22 - use="""Add a tpm interface. On the backend side us the the given 19.23 - instance as virtual TPM instance. Use the backend in the given 19.24 - domain.""") 19.25 + use="""Add a TPM interface. On the backend side use the given 19.26 + instance as virtual TPM instance. The given number is merely the 19.27 + preferred instance number. The hotplug script will determine 19.28 + which instance number will actually be assigned to the domain. 19.29 + The associtation between virtual machine and the TPM instance 19.30 + number can be found in /etc/xen/vtpm.db. Use the backend in the 19.31 + given domain.""") 19.32 19.33 gopts.var('nics', val="NUM", 19.34 fn=set_int, default=1, 19.35 @@ -465,34 +467,20 @@ def configure_vtpm(config_devs, vals): 19.36 if instance == "VTPMD": 19.37 instance = "0" 19.38 else: 19.39 - try: 19.40 - if int(instance) == 0: 19.41 - err('VM config error: vTPM instance must not be 0.') 19.42 - except ValueError: 19.43 - err('Vm config error: could not parse instance number.') 19.44 + if instance != None: 19.45 + try: 19.46 + if int(instance) == 0: 19.47 + err('VM config error: vTPM instance must not be 0.') 19.48 + except ValueError: 19.49 + err('Vm config error: could not parse instance number.') 19.50 backend = d.get('backend') 19.51 config_vtpm = ['vtpm'] 19.52 if instance: 19.53 - config_vtpm.append(['instance', instance]) 19.54 + config_vtpm.append(['pref_instance', instance]) 19.55 if backend: 19.56 config_vtpm.append(['backend', backend]) 19.57 config_devs.append(['device', config_vtpm]) 19.58 19.59 -def configure_tpmif(config_devs, vals): 19.60 - """Create the config for virtual TPM interfaces. 19.61 - """ 19.62 - tpmif = vals.tpmif 19.63 - tpmif_n = 1 19.64 - for idx in range(0, tpmif_n): 19.65 - if idx < len(tpmif): 19.66 - d = tpmif[idx] 19.67 - frontend = d.get('frontend') 19.68 - config_tpmif = ['tpmif'] 19.69 - if frontend: 19.70 - config_tpmif.append(['frontend', frontend]) 19.71 - config_devs.append(['device', config_tpmif]) 19.72 - 19.73 - 19.74 def configure_vifs(config_devs, vals): 19.75 """Create the config for virtual network interfaces. 19.76 """ 19.77 @@ -685,22 +673,6 @@ def preprocess_vtpm(vals): 19.78 vtpms.append(d) 19.79 vals.vtpm = vtpms 19.80 19.81 -def preprocess_tpmif(vals): 19.82 - if not vals.tpmif: return 19.83 - tpmifs = [] 19.84 - for tpmif in vals.tpmif: 19.85 - d = {} 19.86 - a = tpmif.split(',') 19.87 - for b in a: 19.88 - (k, v) = b.strip().split('=', 1) 19.89 - k = k.strip() 19.90 - v = v.strip() 19.91 - if k not in ['frontend']: 19.92 - err('Invalid tpmif specifier: ' + vtpm) 19.93 - d[k] = v 19.94 - tpmifs.append(d) 19.95 - vals.tpmif = tpmifs 19.96 - 19.97 def preprocess_ip(vals): 19.98 if vals.ip or vals.dhcp != 'off': 19.99 dummy_nfs_server = '1.2.3.4' 19.100 @@ -791,7 +763,6 @@ def preprocess(vals): 19.101 preprocess_nfs(vals) 19.102 preprocess_vnc(vals) 19.103 preprocess_vtpm(vals) 19.104 - preprocess_tpmif(vals) 19.105 19.106 def make_domain(opts, config): 19.107 """Create, build and start a domain.
20.1 --- a/xen/arch/x86/dom0_ops.c Fri Nov 25 11:19:03 2005 +0000 20.2 +++ b/xen/arch/x86/dom0_ops.c Fri Nov 25 11:19:09 2005 +0000 20.3 @@ -248,7 +248,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 20.4 20.5 case DOM0_GETPAGEFRAMEINFO2: 20.6 { 20.7 -#define GPF2_BATCH 128 20.8 +#define GPF2_BATCH (PAGE_SIZE / sizeof(unsigned long)) 20.9 int n,j; 20.10 int num = op->u.getpageframeinfo2.num; 20.11 domid_t dom = op->u.getpageframeinfo2.domain; 20.12 @@ -285,12 +285,9 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 20.13 struct pfn_info *page; 20.14 unsigned long mfn = l_arr[j]; 20.15 20.16 - if ( unlikely(mfn >= max_page) ) 20.17 - goto e2_err; 20.18 + page = &frame_table[mfn]; 20.19 20.20 - page = &frame_table[mfn]; 20.21 - 20.22 - if ( likely(get_page(page, d)) ) 20.23 + if ( likely(pfn_valid(mfn) && get_page(page, d)) ) 20.24 { 20.25 unsigned long type = 0; 20.26 20.27 @@ -316,10 +313,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 20.28 put_page(page); 20.29 } 20.30 else 20.31 - { 20.32 - e2_err: 20.33 l_arr[j] |= XTAB; 20.34 - } 20.35 20.36 } 20.37 20.38 @@ -329,7 +323,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 20.39 break; 20.40 } 20.41 20.42 - n += j; 20.43 + n += k; 20.44 } 20.45 20.46 free_xenheap_page(l_arr);
21.1 --- a/xen/common/acm_ops.c Fri Nov 25 11:19:03 2005 +0000 21.2 +++ b/xen/common/acm_ops.c Fri Nov 25 11:19:09 2005 +0000 21.3 @@ -49,15 +49,11 @@ enum acm_operation { 21.4 21.5 int acm_authorize_acm_ops(struct domain *d, enum acm_operation pops) 21.6 { 21.7 - /* all policy management functions are restricted to privileged domains, 21.8 - * soon we will introduce finer-grained privileges for policy operations 21.9 - */ 21.10 + /* currently, policy management functions are restricted to privileged domains */ 21.11 if (!IS_PRIV(d)) 21.12 - { 21.13 - printk("%s: ACM management authorization denied ERROR!\n", __func__); 21.14 - return ACM_ACCESS_DENIED; 21.15 - } 21.16 - return ACM_ACCESS_PERMITTED; 21.17 + return -EPERM; 21.18 + 21.19 + return 0; 21.20 } 21.21 21.22 long do_acm_op(struct acm_op * u_acm_op) 21.23 @@ -65,10 +61,8 @@ long do_acm_op(struct acm_op * u_acm_op) 21.24 long ret = 0; 21.25 struct acm_op curop, *op = &curop; 21.26 21.27 - /* check here policy decision for policy commands */ 21.28 - /* for now allow DOM0 only, later indepedently */ 21.29 if (acm_authorize_acm_ops(current->domain, POLICY)) 21.30 - return -EACCES; 21.31 + return -EPERM; 21.32 21.33 if (copy_from_user(op, u_acm_op, sizeof(*op))) 21.34 return -EFAULT; 21.35 @@ -80,43 +74,32 @@ long do_acm_op(struct acm_op * u_acm_op) 21.36 { 21.37 case ACM_SETPOLICY: 21.38 { 21.39 - if (acm_authorize_acm_ops(current->domain, SETPOLICY)) 21.40 - return -EACCES; 21.41 - printkd("%s: setting policy.\n", __func__); 21.42 - ret = acm_set_policy(op->u.setpolicy.pushcache, 21.43 - op->u.setpolicy.pushcache_size, 1); 21.44 - if (ret == ACM_OK) 21.45 - ret = 0; 21.46 - else 21.47 - ret = -ESRCH; 21.48 + ret = acm_authorize_acm_ops(current->domain, SETPOLICY); 21.49 + if (!ret) 21.50 + ret = acm_set_policy(op->u.setpolicy.pushcache, 21.51 + op->u.setpolicy.pushcache_size, 1); 21.52 } 21.53 break; 21.54 21.55 case ACM_GETPOLICY: 21.56 { 21.57 - if (acm_authorize_acm_ops(current->domain, GETPOLICY)) 21.58 - return -EACCES; 21.59 - printkd("%s: getting policy.\n", __func__); 21.60 - ret = acm_get_policy(op->u.getpolicy.pullcache, 21.61 - op->u.getpolicy.pullcache_size); 21.62 - if (ret == ACM_OK) 21.63 - ret = 0; 21.64 - else 21.65 - ret = -ESRCH; 21.66 + ret = acm_authorize_acm_ops(current->domain, GETPOLICY); 21.67 + if (!ret) 21.68 + ret = acm_get_policy(op->u.getpolicy.pullcache, 21.69 + op->u.getpolicy.pullcache_size); 21.70 + if (!ret) 21.71 + copy_to_user(u_acm_op, op, sizeof(*op)); 21.72 } 21.73 break; 21.74 21.75 case ACM_DUMPSTATS: 21.76 { 21.77 - if (acm_authorize_acm_ops(current->domain, DUMPSTATS)) 21.78 - return -EACCES; 21.79 - printkd("%s: dumping statistics.\n", __func__); 21.80 - ret = acm_dump_statistics(op->u.dumpstats.pullcache, 21.81 - op->u.dumpstats.pullcache_size); 21.82 - if (ret == ACM_OK) 21.83 - ret = 0; 21.84 - else 21.85 - ret = -ESRCH; 21.86 + ret = acm_authorize_acm_ops(current->domain, DUMPSTATS); 21.87 + if (!ret) 21.88 + ret = acm_dump_statistics(op->u.dumpstats.pullcache, 21.89 + op->u.dumpstats.pullcache_size); 21.90 + if (!ret) 21.91 + copy_to_user(u_acm_op, op, sizeof(*op)); 21.92 } 21.93 break; 21.94 21.95 @@ -124,31 +107,39 @@ long do_acm_op(struct acm_op * u_acm_op) 21.96 { 21.97 ssidref_t ssidref; 21.98 21.99 - if (acm_authorize_acm_ops(current->domain, GETSSID)) 21.100 - return -EACCES; 21.101 - printkd("%s: getting SSID.\n", __func__); 21.102 + ret = acm_authorize_acm_ops(current->domain, GETSSID); 21.103 + if (ret) 21.104 + break; 21.105 + 21.106 if (op->u.getssid.get_ssid_by == SSIDREF) 21.107 ssidref = op->u.getssid.id.ssidref; 21.108 - else if (op->u.getssid.get_ssid_by == DOMAINID) { 21.109 + else if (op->u.getssid.get_ssid_by == DOMAINID) 21.110 + { 21.111 struct domain *subj = find_domain_by_id(op->u.getssid.id.domainid); 21.112 if (!subj) 21.113 - return -ESRCH; /* domain not found */ 21.114 - if (subj->ssid == NULL) { 21.115 + { 21.116 + ret = -ESRCH; /* domain not found */ 21.117 + break; 21.118 + } 21.119 + if (subj->ssid == NULL) 21.120 + { 21.121 put_domain(subj); 21.122 - return -ESRCH; 21.123 + ret = -ESRCH; 21.124 + break; 21.125 } 21.126 ssidref = ((struct acm_ssid_domain *)(subj->ssid))->ssidref; 21.127 put_domain(subj); 21.128 - } else 21.129 - return -ESRCH; 21.130 - 21.131 + } 21.132 + else 21.133 + { 21.134 + ret = -ESRCH; 21.135 + break; 21.136 + } 21.137 ret = acm_get_ssid(ssidref, 21.138 op->u.getssid.ssidbuf, 21.139 op->u.getssid.ssidbuf_size); 21.140 - if (ret == ACM_OK) 21.141 - ret = 0; 21.142 - else 21.143 - ret = -ESRCH; 21.144 + if (!ret) 21.145 + copy_to_user(u_acm_op, op, sizeof(*op)); 21.146 } 21.147 break; 21.148 21.149 @@ -156,51 +147,75 @@ long do_acm_op(struct acm_op * u_acm_op) 21.150 { 21.151 ssidref_t ssidref1, ssidref2; 21.152 21.153 - if (acm_authorize_acm_ops(current->domain, GETDECISION)) { 21.154 - ret = -EACCES; 21.155 - goto out; 21.156 - } 21.157 - printkd("%s: getting access control decision.\n", __func__); 21.158 - if (op->u.getdecision.get_decision_by1 == SSIDREF) { 21.159 + ret = acm_authorize_acm_ops(current->domain, GETDECISION); 21.160 + if (ret) 21.161 + break; 21.162 + 21.163 + if (op->u.getdecision.get_decision_by1 == SSIDREF) 21.164 ssidref1 = op->u.getdecision.id1.ssidref; 21.165 - } 21.166 - else if (op->u.getdecision.get_decision_by1 == DOMAINID) { 21.167 + else if (op->u.getdecision.get_decision_by1 == DOMAINID) 21.168 + { 21.169 struct domain *subj = find_domain_by_id(op->u.getdecision.id1.domainid); 21.170 - if (!subj) { 21.171 + if (!subj) 21.172 + { 21.173 ret = -ESRCH; /* domain not found */ 21.174 - goto out; 21.175 + break; 21.176 } 21.177 - if (subj->ssid == NULL) { 21.178 + if (subj->ssid == NULL) 21.179 + { 21.180 put_domain(subj); 21.181 ret = -ESRCH; 21.182 - goto out; 21.183 + break; 21.184 } 21.185 ssidref1 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref; 21.186 put_domain(subj); 21.187 - } else { 21.188 + } 21.189 + else 21.190 + { 21.191 ret = -ESRCH; 21.192 - goto out; 21.193 - } 21.194 - if (op->u.getdecision.get_decision_by2 == SSIDREF) { 21.195 - ssidref2 = op->u.getdecision.id2.ssidref; 21.196 + break; 21.197 } 21.198 - else if (op->u.getdecision.get_decision_by2 == DOMAINID) { 21.199 + if (op->u.getdecision.get_decision_by2 == SSIDREF) 21.200 + ssidref2 = op->u.getdecision.id2.ssidref; 21.201 + else if (op->u.getdecision.get_decision_by2 == DOMAINID) 21.202 + { 21.203 struct domain *subj = find_domain_by_id(op->u.getdecision.id2.domainid); 21.204 - if (!subj) { 21.205 + if (!subj) 21.206 + { 21.207 ret = -ESRCH; /* domain not found */ 21.208 - goto out; 21.209 + break;; 21.210 } 21.211 - if (subj->ssid == NULL) { 21.212 + if (subj->ssid == NULL) 21.213 + { 21.214 put_domain(subj); 21.215 - return -ESRCH; 21.216 + ret = -ESRCH; 21.217 + break; 21.218 } 21.219 ssidref2 = ((struct acm_ssid_domain *)(subj->ssid))->ssidref; 21.220 put_domain(subj); 21.221 - } else { 21.222 + } 21.223 + else 21.224 + { 21.225 ret = -ESRCH; 21.226 - goto out; 21.227 + break; 21.228 } 21.229 ret = acm_get_decision(ssidref1, ssidref2, op->u.getdecision.hook); 21.230 + 21.231 + if (ret == ACM_ACCESS_PERMITTED) 21.232 + { 21.233 + op->u.getdecision.acm_decision = ACM_ACCESS_PERMITTED; 21.234 + ret = 0; 21.235 + } 21.236 + else if (ret == ACM_ACCESS_DENIED) 21.237 + { 21.238 + op->u.getdecision.acm_decision = ACM_ACCESS_DENIED; 21.239 + ret = 0; 21.240 + } 21.241 + else 21.242 + ret = -ESRCH; 21.243 + 21.244 + if (!ret) 21.245 + copy_to_user(u_acm_op, op, sizeof(*op)); 21.246 } 21.247 break; 21.248 21.249 @@ -208,20 +223,6 @@ long do_acm_op(struct acm_op * u_acm_op) 21.250 ret = -ESRCH; 21.251 } 21.252 21.253 - out: 21.254 - if (ret == ACM_ACCESS_PERMITTED) { 21.255 - op->u.getdecision.acm_decision = ACM_ACCESS_PERMITTED; 21.256 - ret = 0; 21.257 - } else if (ret == ACM_ACCESS_DENIED) { 21.258 - op->u.getdecision.acm_decision = ACM_ACCESS_DENIED; 21.259 - ret = 0; 21.260 - } else { 21.261 - op->u.getdecision.acm_decision = ACM_ACCESS_DENIED; 21.262 - if (ret > 0) 21.263 - ret = -ret; 21.264 - } 21.265 - /* copy decision back to user space */ 21.266 - copy_to_user(u_acm_op, op, sizeof(*op)); 21.267 return ret; 21.268 } 21.269