direct-io.hg
changeset 1263:31bcc6528b15
bitkeeper revision 1.825.8.1 (406b3b3aiVrvWmI8XjJ_64Sk92hivw)
Many files:
Faster upcall masking from Xen to guest OS.
Many files:
Faster upcall masking from Xen to guest OS.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Mar 31 21:42:18 2004 +0000 (2004-03-31) |
parents | 109663e1ac8d |
children | 09b8b95eb976 |
files | tools/xc/lib/xc_linux_build.c tools/xc/lib/xc_netbsd_build.c xen/arch/i386/entry.S xen/arch/i386/traps.c xen/common/domain.c xen/common/keyhandler.c xen/common/schedule.c xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xen/event.h xen/include/xen/sched.h xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h xenolinux-2.4.25-sparse/include/asm-xen/system.h |
line diff
1.1 --- a/tools/xc/lib/xc_linux_build.c Wed Mar 31 16:15:50 2004 +0000 1.2 +++ b/tools/xc/lib/xc_linux_build.c Wed Mar 31 21:42:18 2004 +0000 1.3 @@ -284,7 +284,9 @@ static int setup_guestos(int xc_handle, 1.4 /* shared_info page starts its life empty. */ 1.5 shared_info = map_pfn_writeable(pm_handle, shared_info_frame); 1.6 memset(shared_info, 0, PAGE_SIZE); 1.7 - shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */ 1.8 + /* Mask all upcalls... */ 1.9 + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 1.10 + shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 1.11 unmap_pfn(pm_handle, shared_info); 1.12 1.13 /* Send the page update requests down to the hypervisor. */
2.1 --- a/tools/xc/lib/xc_netbsd_build.c Wed Mar 31 16:15:50 2004 +0000 2.2 +++ b/tools/xc/lib/xc_netbsd_build.c Wed Mar 31 21:42:18 2004 +0000 2.3 @@ -75,7 +75,7 @@ static int setup_guestos(int xc_handle, 2.4 shared_info_t *shared_info; 2.5 unsigned long ksize; 2.6 mmu_t *mmu = NULL; 2.7 - int pm_handle; 2.8 + int pm_handle, i; 2.9 2.10 memset(builddomain, 0, sizeof(*builddomain)); 2.11 2.12 @@ -183,7 +183,9 @@ static int setup_guestos(int xc_handle, 2.13 /* shared_info page starts its life empty. */ 2.14 shared_info = map_pfn_writeable(pm_handle, shared_info_frame); 2.15 memset(shared_info, 0, PAGE_SIZE); 2.16 - shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */ 2.17 + /* Mask all upcalls... */ 2.18 + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 2.19 + shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 2.20 unmap_pfn(pm_handle, shared_info); 2.21 2.22 /* Send the page update requests down to the hypervisor. */
3.1 --- a/xen/arch/i386/entry.S Wed Mar 31 16:15:50 2004 +0000 3.2 +++ b/xen/arch/i386/entry.S Wed Mar 31 21:42:18 2004 +0000 3.3 @@ -112,8 +112,8 @@ FAILSAFE_SEL = 32 3.4 FAILSAFE_ADDR = 36 3.5 3.6 /* Offsets in shared_info_t */ 3.7 -UPCALL_PENDING = 0 3.8 -UPCALL_MASK = 4 3.9 +#define UPCALL_PENDING /* 0 */ 3.10 +#define UPCALL_MASK 1 3.11 3.12 /* Offsets in guest_trap_bounce */ 3.13 GTB_ERROR_CODE = 0 3.14 @@ -368,12 +368,11 @@ test_all_events: 3.15 jnz process_hyp_events 3.16 /*test_guest_events:*/ 3.17 movl SHARED_INFO(%ebx),%eax 3.18 - movl UPCALL_MASK(%eax),%ecx 3.19 - notl %ecx 3.20 - andl UPCALL_PENDING(%eax),%ecx # ECX = pending & ~mask 3.21 - andl $1,%ecx # Is bit 0 pending and not masked? 3.22 + testb $0xFF,UPCALL_MASK(%eax) 3.23 + jnz restore_all_guest 3.24 + testb $0xFF,UPCALL_PENDING(%eax) 3.25 jz restore_all_guest 3.26 - lock btsl $0,UPCALL_MASK(%eax) # Upcalls are masked during delivery 3.27 + movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery 3.28 /*process_guest_events:*/ 3.29 movzwl PROCESSOR(%ebx),%edx 3.30 shl $4,%edx # sizeof(guest_trap_bounce) == 16
4.1 --- a/xen/arch/i386/traps.c Wed Mar 31 16:15:50 2004 +0000 4.2 +++ b/xen/arch/i386/traps.c Wed Mar 31 21:42:18 2004 +0000 4.3 @@ -206,7 +206,7 @@ static inline void do_trap(int trapnr, c 4.4 gtb->cs = ti->cs; 4.5 gtb->eip = ti->address; 4.6 if ( TI_GET_IF(ti) ) 4.7 - set_bit(0, &p->shared_info->evtchn_upcall_mask); 4.8 + p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 4.9 return; 4.10 4.11 fault_in_hypervisor: 4.12 @@ -277,9 +277,7 @@ asmlinkage void do_int3(struct pt_regs * 4.13 gtb->cs = ti->cs; 4.14 gtb->eip = ti->address; 4.15 if ( TI_GET_IF(ti) ) 4.16 - set_bit(0, &p->shared_info->evtchn_upcall_mask); 4.17 - return; 4.18 - 4.19 + p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 4.20 } 4.21 4.22 asmlinkage void do_double_fault(void) 4.23 @@ -353,7 +351,7 @@ asmlinkage void do_page_fault(struct pt_ 4.24 gtb->cs = ti->cs; 4.25 gtb->eip = ti->address; 4.26 if ( TI_GET_IF(ti) ) 4.27 - set_bit(0, &p->shared_info->evtchn_upcall_mask); 4.28 + p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 4.29 return; 4.30 4.31 fault_in_hypervisor: 4.32 @@ -452,7 +450,7 @@ asmlinkage void do_general_protection(st 4.33 gtb->cs = ti->cs; 4.34 gtb->eip = ti->address; 4.35 if ( TI_GET_IF(ti) ) 4.36 - set_bit(0, &p->shared_info->evtchn_upcall_mask); 4.37 + p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 4.38 return; 4.39 4.40 gp_in_kernel:
5.1 --- a/xen/common/domain.c Wed Mar 31 16:15:50 2004 +0000 5.2 +++ b/xen/common/domain.c Wed Mar 31 21:42:18 2004 +0000 5.3 @@ -918,7 +918,9 @@ int construct_dom0(struct task_struct *p 5.4 /* Set up shared-info area. */ 5.5 update_dom_time(p->shared_info); 5.6 p->shared_info->domain_time = 0; 5.7 - p->shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */ 5.8 + /* Mask all upcalls... */ 5.9 + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 5.10 + p->shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 5.11 5.12 /* Install the new page tables. */ 5.13 __cli();
6.1 --- a/xen/common/keyhandler.c Wed Mar 31 16:15:50 2004 +0000 6.2 +++ b/xen/common/keyhandler.c Wed Mar 31 21:42:18 2004 +0000 6.3 @@ -94,8 +94,9 @@ void do_task_queues(u_char key, void *de 6.4 sched_prn_state(p ->state); 6.5 printk(", hyp_events = %08x\n", p->hyp_events); 6.6 s = p->shared_info; 6.7 - printk("Guest: upcall_pend = %08lx, upcall_mask = %08lx\n", 6.8 - s->evtchn_upcall_pending, s->evtchn_upcall_mask); 6.9 + printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 6.10 + s->vcpu_data[0].evtchn_upcall_pending, 6.11 + s->vcpu_data[0].evtchn_upcall_mask); 6.12 printk("Notifying guest...\n"); 6.13 send_guest_virq(p, VIRQ_DEBUG); 6.14 }
7.1 --- a/xen/common/schedule.c Wed Mar 31 16:15:50 2004 +0000 7.2 +++ b/xen/common/schedule.c Wed Mar 31 21:42:18 2004 +0000 7.3 @@ -220,7 +220,7 @@ void wake_up(struct task_struct *p) 7.4 static long do_block(void) 7.5 { 7.6 ASSERT(current->domain != IDLE_DOMAIN_ID); 7.7 - clear_bit(0, ¤t->shared_info->evtchn_upcall_mask); 7.8 + current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0; 7.9 current->state = TASK_INTERRUPTIBLE; 7.10 TRACE_2D(TRC_SCHED_BLOCK, current->domain, current); 7.11 __enter_scheduler();
8.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Wed Mar 31 16:15:50 2004 +0000 8.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Wed Mar 31 21:42:18 2004 +0000 8.3 @@ -150,6 +150,9 @@ typedef struct 8.4 /* Event channel endpoints per domain. */ 8.5 #define NR_EVENT_CHANNELS 1024 8.6 8.7 +/* No support for multi-processor guests. */ 8.8 +#define MAX_VIRT_CPUS 1 8.9 + 8.10 /* 8.11 * Xen/guestos shared data -- pointer provided in start_info. 8.12 * NB. We expect that this struct is smaller than a page. 8.13 @@ -157,13 +160,39 @@ typedef struct 8.14 typedef struct shared_info_st 8.15 { 8.16 /* 8.17 - * If bit 0 in evtchn_upcall_pending is transitioned 0->1, and bit 0 in 8.18 - * evtchn_upcall_mask is clear, then an asynchronous upcall is scheduled. 8.19 - * The upcall mask can be used to prevent unbounded reentrancy and stack 8.20 - * overflow (in this way, acts as a kind of interrupt-enable flag). 8.21 + * Per-VCPU information goes here. This will be cleaned up more when Xen 8.22 + * actually supports multi-VCPU guests. 8.23 */ 8.24 - unsigned long evtchn_upcall_pending; 8.25 - unsigned long evtchn_upcall_mask; 8.26 + struct { 8.27 + /* 8.28 + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate 8.29 + * a pending notification for a particular VCPU. It is then cleared 8.30 + * by the guest OS /before/ checking for pending work, thus avoiding 8.31 + * a set-and-check race. Note that the mask is only accessed by Xen 8.32 + * on the CPU that is currently hosting the VCPU. This means that the 8.33 + * pending and mask flags can be updated by the guest without special 8.34 + * synchronisation (i.e., no need for the x86 LOCK prefix). 8.35 + * This may seem suboptimal because if the pending flag is set by 8.36 + * a different CPU then an IPI may be scheduled even when the mask 8.37 + * is set. However, note: 8.38 + * 1. The task of 'interrupt holdoff' is covered by the per-event- 8.39 + * channel mask bits. A 'noisy' event that is continually being 8.40 + * triggered can be masked at source at this very precise 8.41 + * granularity. 8.42 + * 2. The main purpose of the per-VCPU mask is therefore to restrict 8.43 + * reentrant execution: whether for concurrency control, or to 8.44 + * prevent unbounded stack usage. Whatever the purpose, we expect 8.45 + * that the mask will be asserted only for short periods at a time, 8.46 + * and so the likelihood of a 'spurious' IPI is suitably small. 8.47 + * The mask is read before making an event upcall to the guest: a 8.48 + * non-zero mask therefore guarantees that the VCPU will not receive 8.49 + * an upcall activation. The mask is cleared when the VCPU requests 8.50 + * to block: this avoids wakeup-waiting races. 8.51 + */ 8.52 + u8 evtchn_upcall_pending; 8.53 + u8 evtchn_upcall_mask; 8.54 + u8 pad0, pad1; 8.55 + } vcpu_data[MAX_VIRT_CPUS]; 8.56 8.57 /* 8.58 * A domain can have up to 1024 "event channels" on which it can send
9.1 --- a/xen/include/xen/event.h Wed Mar 31 16:15:50 2004 +0000 9.2 +++ b/xen/include/xen/event.h Wed Mar 31 21:42:18 2004 +0000 9.3 @@ -18,7 +18,7 @@ 9.4 */ 9.5 9.6 /* Schedule an asynchronous callback for the specified domain. */ 9.7 -static inline void __guest_notify(struct task_struct *p) 9.8 +static inline void guest_schedule_to_run(struct task_struct *p) 9.9 { 9.10 #ifdef CONFIG_SMP 9.11 unsigned long flags, cpu_mask; 9.12 @@ -41,23 +41,11 @@ static inline void __guest_notify(struct 9.13 #endif 9.14 } 9.15 9.16 -static inline void guest_notify(struct task_struct *p) 9.17 -{ 9.18 - /* 9.19 - * Upcall already pending or upcalls masked? 9.20 - * NB. Suitably synchronised on x86: 9.21 - * We must set the pending bit before checking the mask, but this is 9.22 - * guaranteed to occur because test_and_set_bit() is an ordering barrier. 9.23 - */ 9.24 - if ( !test_and_set_bit(0, &p->shared_info->evtchn_upcall_pending) && 9.25 - !test_bit(0, &p->shared_info->evtchn_upcall_mask) ) 9.26 - __guest_notify(p); 9.27 -} 9.28 - 9.29 - 9.30 /* 9.31 * EVENT-CHANNEL NOTIFICATIONS 9.32 - * NB. As in guest_notify, evtchn_set_* is suitably synchronised on x86. 9.33 + * NB. On x86, the atomic bit operations also act as memory barriers. There 9.34 + * is therefore sufficiently strict ordering for this architecture -- others 9.35 + * may require explicit memory barriers. 9.36 */ 9.37 9.38 static inline void evtchn_set_pending(struct task_struct *p, int port) 9.39 @@ -66,7 +54,11 @@ static inline void evtchn_set_pending(st 9.40 if ( !test_and_set_bit(port, &s->evtchn_pending[0]) && 9.41 !test_bit (port, &s->evtchn_mask[0]) && 9.42 !test_and_set_bit(port>>5, &s->evtchn_pending_sel) ) 9.43 - guest_notify(p); 9.44 + { 9.45 + /* The VCPU pending flag must be set /after/ update to evtchn-pend. */ 9.46 + p->shared_info->vcpu_data[0].evtchn_upcall_pending = 1; 9.47 + guest_schedule_to_run(p); 9.48 + } 9.49 } 9.50 9.51 static inline void evtchn_set_exception(struct task_struct *p, int port) 9.52 @@ -103,7 +95,7 @@ static inline void send_guest_pirq(struc 9.53 static inline void send_hyp_event(struct task_struct *p, int event) 9.54 { 9.55 if ( !test_and_set_bit(event, &p->hyp_events) ) 9.56 - __guest_notify(p); 9.57 + guest_schedule_to_run(p); 9.58 } 9.59 9.60 /* Called on return from (architecture-dependent) entry.S. */
10.1 --- a/xen/include/xen/sched.h Wed Mar 31 16:15:50 2004 +0000 10.2 +++ b/xen/include/xen/sched.h Wed Mar 31 21:42:18 2004 +0000 10.3 @@ -297,10 +297,10 @@ static inline long schedule_timeout(long 10.4 return 0; 10.5 } 10.6 10.7 -#define signal_pending(_p) \ 10.8 - (((_p)->hyp_events != 0) || \ 10.9 - (test_bit(0, &(_p)->shared_info->evtchn_upcall_pending) && \ 10.10 - !test_bit(0, &(_p)->shared_info->evtchn_upcall_mask))) 10.11 +#define signal_pending(_p) \ 10.12 + ( (_p)->hyp_events || \ 10.13 + ((_p)->shared_info->vcpu_data[0].evtchn_upcall_pending && \ 10.14 + !(_p)->shared_info->vcpu_data[0].evtchn_upcall_mask) ) 10.15 10.16 void domain_init(void); 10.17
11.1 --- a/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S Wed Mar 31 16:15:50 2004 +0000 11.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S Wed Mar 31 21:42:18 2004 +0000 11.3 @@ -80,7 +80,7 @@ processor = 52 11.4 11.5 /* Offsets into shared_info_t. */ 11.6 #define evtchn_upcall_pending /* 0 */ 11.7 -#define evtchn_upcall_mask 4 11.8 +#define evtchn_upcall_mask 1 11.9 11.10 ENOSYS = 38 11.11 11.12 @@ -210,14 +210,14 @@ ENTRY(system_call) 11.13 movl %eax,EAX(%esp) # save the return value 11.14 ENTRY(ret_from_sys_call) 11.15 movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi 11.16 - lock btsl $0,evtchn_upcall_mask(%esi) # make tests atomic 11.17 + movb $1,evtchn_upcall_mask(%esi) # make tests atomic 11.18 ret_syscall_tests: 11.19 cmpl $0,need_resched(%ebx) 11.20 jne reschedule 11.21 cmpl $0,sigpending(%ebx) 11.22 je safesti # ensure need_resched updates are seen 11.23 signal_return: 11.24 - lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.25 + movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.26 movl %esp,%eax 11.27 xorl %edx,%edx 11.28 call SYMBOL_NAME(do_signal) 11.29 @@ -254,9 +254,9 @@ ret_from_exception: 11.30 11.31 ALIGN 11.32 reschedule: 11.33 - lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.34 - call SYMBOL_NAME(schedule) # test 11.35 - jmp ret_from_sys_call 11.36 + movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.37 + call SYMBOL_NAME(schedule) # test 11.38 + jmp ret_from_sys_call 11.39 11.40 ENTRY(divide_error) 11.41 pushl $0 # no error code 11.42 @@ -317,12 +317,12 @@ 11: push %esp 11.43 movb CS(%esp),%cl 11.44 test $2,%cl # slow return to ring 2 or 3 11.45 jne ret_syscall_tests 11.46 -safesti:lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.47 +safesti:movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks 11.48 scrit: /**** START OF CRITICAL REGION ****/ 11.49 - testb $1,evtchn_upcall_pending(%esi) 11.50 + testb $0xFF,evtchn_upcall_pending(%esi) 11.51 jnz 14f # process more events if necessary... 11.52 RESTORE_ALL 11.53 -14: lock btsl $0,evtchn_upcall_mask(%esi) 11.54 +14: movb $1,evtchn_upcall_mask(%esi) 11.55 jmp 11b 11.56 ecrit: /**** END OF CRITICAL REGION ****/ 11.57 # [How we do the fixup]. We want to merge the current stack frame with the 11.58 @@ -351,7 +351,7 @@ 16: movl %edi,%esp # final %e 11.59 jmp 11b 11.60 11.61 critical_fixup_table: 11.62 - .byte 0x00,0x00,0x00 # testb $1,(%esi) 11.63 + .byte 0x00,0x00,0x00 # testb $0xFF,(%esi) 11.64 .byte 0x00,0x00 # jnz 14f 11.65 .byte 0x00 # pop %ebx 11.66 .byte 0x04 # pop %ecx 11.67 @@ -364,7 +364,7 @@ critical_fixup_table: 11.68 .byte 0x20 # pop %es 11.69 .byte 0x24,0x24,0x24 # add $4,%esp 11.70 .byte 0x28 # iret 11.71 - .byte 0x00,0x00,0x00,0x00,0x00,0x00 # lock btsl $0,4(%esi) 11.72 + .byte 0x00,0x00,0x00,0x00 # movb $1,4(%esi) 11.73 .byte 0x00,0x00 # jmp 11b 11.74 11.75 # Hypervisor uses this for application faults while it executes.
12.1 --- a/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c Wed Mar 31 16:15:50 2004 +0000 12.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c Wed Mar 31 21:42:18 2004 +0000 12.3 @@ -50,8 +50,10 @@ void evtchn_do_upcall(struct pt_regs *re 12.4 12.5 local_irq_save(flags); 12.6 12.7 - while ( synch_test_and_clear_bit(0, &s->evtchn_upcall_pending) ) 12.8 + while ( s->vcpu_data[0].evtchn_upcall_pending ) 12.9 { 12.10 + s->vcpu_data[0].evtchn_upcall_pending = 0; 12.11 + /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ 12.12 l1 = xchg(&s->evtchn_pending_sel, 0); 12.13 while ( (l1i = ffs(l1)) != 0 ) 12.14 {
13.1 --- a/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h Wed Mar 31 16:15:50 2004 +0000 13.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h Wed Mar 31 21:42:18 2004 +0000 13.3 @@ -42,10 +42,12 @@ static inline void unmask_evtchn(int por 13.4 * a real IO-APIC we 'lose the interrupt edge' if the channel is masked. 13.5 */ 13.6 if ( synch_test_bit (port, &s->evtchn_pending[0]) && 13.7 - !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) && 13.8 - !synch_test_and_set_bit(0, &s->evtchn_upcall_pending) && 13.9 - !synch_test_bit (0, &s->evtchn_upcall_mask) ) 13.10 - evtchn_do_upcall(NULL); 13.11 + !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) ) 13.12 + { 13.13 + s->vcpu_data[0].evtchn_upcall_pending = 1; 13.14 + if ( !s->vcpu_data[0].evtchn_upcall_mask ) 13.15 + evtchn_do_upcall(NULL); 13.16 + } 13.17 } 13.18 13.19 static inline void clear_evtchn(int port)
14.1 --- a/xenolinux-2.4.25-sparse/include/asm-xen/system.h Wed Mar 31 16:15:50 2004 +0000 14.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xen/system.h Wed Mar 31 21:42:18 2004 +0000 14.3 @@ -302,42 +302,55 @@ static inline unsigned long __cmpxchg(vo 14.4 14.5 #define safe_halt() ((void)0) 14.6 14.7 -/* 14.8 - * Note the use of synch_*_bit() operations in the following. These operations 14.9 - * ensure correct serialisation of checks and updates w.r.t. Xen executing on 14.10 - * a different CPU. 14.11 +/* 14.12 + * The use of 'barrier' in the following reflects their use as local-lock 14.13 + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following 14.14 + * critical operations are executed. All critical operatiosn must complete 14.15 + * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also 14.16 + * includes these barriers, for example. 14.17 */ 14.18 14.19 #define __cli() \ 14.20 do { \ 14.21 - synch_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 14.22 + HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \ 14.23 + barrier(); \ 14.24 } while (0) 14.25 14.26 #define __sti() \ 14.27 do { \ 14.28 shared_info_t *_shared = HYPERVISOR_shared_info; \ 14.29 - synch_clear_bit(0, &_shared->evtchn_upcall_mask); \ 14.30 - if ( unlikely(synch_test_bit(0, &_shared->evtchn_upcall_pending)) ) \ 14.31 + barrier(); \ 14.32 + _shared->vcpu_data[0].evtchn_upcall_mask = 0; \ 14.33 + if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \ 14.34 evtchn_do_upcall(NULL); \ 14.35 } while (0) 14.36 14.37 #define __save_flags(x) \ 14.38 do { \ 14.39 - (x) = synch_test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 14.40 + (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 14.41 } while (0) 14.42 14.43 -#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0) 14.44 +#define __restore_flags(x) \ 14.45 +do { \ 14.46 + shared_info_t *_shared = HYPERVISOR_shared_info; \ 14.47 + barrier(); \ 14.48 + if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 ) \ 14.49 + if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \ 14.50 + evtchn_do_upcall(NULL); \ 14.51 +} while (0) 14.52 14.53 #define __save_and_cli(x) \ 14.54 do { \ 14.55 - (x) = synch_test_and_set_bit( \ 14.56 - 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 14.57 + (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 14.58 + HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \ 14.59 + barrier(); \ 14.60 } while (0) 14.61 14.62 #define __save_and_sti(x) \ 14.63 do { \ 14.64 - (x) = synch_test_and_clear_bit( \ 14.65 - 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 14.66 + barrier(); \ 14.67 + (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \ 14.68 + HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 0; \ 14.69 } while (0) 14.70 14.71 #define local_irq_save(x) __save_and_cli(x)