direct-io.hg
changeset 8524:dfb836264898
Merged.
author | emellor@leeni.uk.xensource.com |
---|---|
date | Mon Jan 09 14:43:46 2006 +0000 (2006-01-09) |
parents | 970cf1fff5f2 fbf58585008a |
children | 1572681e4e5a |
files |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Mon Jan 09 11:24:02 2006 +0000 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Mon Jan 09 14:43:46 2006 +0000 1.3 @@ -109,7 +109,7 @@ static ssize_t xenbus_dev_write(struct f 1.4 size_t len, loff_t *ppos) 1.5 { 1.6 struct xenbus_dev_data *u = filp->private_data; 1.7 - struct xenbus_dev_transaction *trans; 1.8 + struct xenbus_dev_transaction *trans = NULL; 1.9 void *reply; 1.10 1.11 if ((len + u->len) > sizeof(u->u.buffer)) 1.12 @@ -134,14 +134,19 @@ static ssize_t xenbus_dev_write(struct f 1.13 case XS_MKDIR: 1.14 case XS_RM: 1.15 case XS_SET_PERMS: 1.16 - reply = xenbus_dev_request_and_reply(&u->u.msg); 1.17 - if (IS_ERR(reply)) 1.18 - return PTR_ERR(reply); 1.19 - 1.20 if (u->u.msg.type == XS_TRANSACTION_START) { 1.21 trans = kmalloc(sizeof(*trans), GFP_KERNEL); 1.22 if (!trans) 1.23 return -ENOMEM; 1.24 + } 1.25 + 1.26 + reply = xenbus_dev_request_and_reply(&u->u.msg); 1.27 + if (IS_ERR(reply)) { 1.28 + kfree(trans); 1.29 + return PTR_ERR(reply); 1.30 + } 1.31 + 1.32 + if (u->u.msg.type == XS_TRANSACTION_START) { 1.33 trans->handle = (struct xenbus_transaction *) 1.34 simple_strtoul(reply, NULL, 0); 1.35 list_add(&trans->list, &u->transactions);
2.1 --- a/xen/arch/x86/domain.c Mon Jan 09 11:24:02 2006 +0000 2.2 +++ b/xen/arch/x86/domain.c Mon Jan 09 14:43:46 2006 +0000 2.3 @@ -689,6 +689,9 @@ static void __context_switch(void) 2.4 struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; 2.5 struct vcpu *n = current; 2.6 2.7 + ASSERT(p != n); 2.8 + ASSERT(cpus_empty(n->vcpu_dirty_cpumask)); 2.9 + 2.10 if ( !is_idle_domain(p->domain) ) 2.11 { 2.12 memcpy(&p->arch.guest_context.user_regs, 2.13 @@ -748,24 +751,31 @@ static void __context_switch(void) 2.14 void context_switch(struct vcpu *prev, struct vcpu *next) 2.15 { 2.16 unsigned int cpu = smp_processor_id(); 2.17 + cpumask_t dirty_mask = next->vcpu_dirty_cpumask; 2.18 2.19 ASSERT(local_irq_is_enabled()); 2.20 2.21 + /* Allow at most one CPU at a time to be dirty. */ 2.22 + ASSERT(cpus_weight(dirty_mask) <= 1); 2.23 + if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) ) 2.24 + { 2.25 + /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ 2.26 + flush_tlb_mask(dirty_mask); 2.27 + } 2.28 + 2.29 + local_irq_disable(); 2.30 + 2.31 set_current(next); 2.32 2.33 - if ( (percpu_ctxt[cpu].curr_vcpu != next) && 2.34 - !is_idle_domain(next->domain) ) 2.35 + if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_domain(next->domain) ) 2.36 { 2.37 - /* This may happen if next has been migrated by the scheduler. */ 2.38 - if ( unlikely(!cpus_empty(next->vcpu_dirty_cpumask)) ) 2.39 - { 2.40 - ASSERT(!cpu_isset(cpu, next->vcpu_dirty_cpumask)); 2.41 - sync_vcpu_execstate(next); 2.42 - ASSERT(cpus_empty(next->vcpu_dirty_cpumask)); 2.43 - } 2.44 + local_irq_enable(); 2.45 + } 2.46 + else 2.47 + { 2.48 + __context_switch(); 2.49 2.50 - local_irq_disable(); 2.51 - __context_switch(); 2.52 + /* Re-enable interrupts before restoring state which may fault. */ 2.53 local_irq_enable(); 2.54 2.55 if ( VMX_DOMAIN(next) )