v->vcpu_info_mfn = page_to_mfn(page);
/* Set new vcpu_info pointer /before/ setting pending flags. */
- wmb();
+ smp_wmb();
/*
* Mark everything as being pending just to make sure nothing gets
/* Install vcpu array /then/ update max_vcpus. */
d->vcpu = vcpus;
- wmb();
+ smp_wmb();
d->max_vcpus = max;
}
/* Make sure guest sees status update before checking if flags are
still valid */
- mb();
+ smp_mb();
scombo.word = *(u32 *)shah;
barrier();
guest_physmap_add_page(e, sha->full_page.frame, mfn, 0);
sha->full_page.frame = mfn;
}
- wmb();
+ smp_wmb();
shared_entry_header(e->grant_table, gop.ref)->flags |=
GTF_transfer_completed;
ASSERT(page_get_owner(&pg[i]) == NULL);
ASSERT((pg[i].count_info & ~(PGC_allocated | 1)) == 0);
page_set_owner(&pg[i], d);
- wmb(); /* Domain pointer must be visible before updating refcnt. */
+ smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
pg[i].count_info = PGC_allocated | 1;
page_list_add_tail(&pg[i], &d->page_list);
}
if ( call_data.wait )
{
(*func)(info);
- mb();
+ smp_mb();
cpumask_clear_cpu(cpu, &call_data.selected);
}
else
{
- mb();
+ smp_mb();
cpumask_clear_cpu(cpu, &call_data.selected);
(*func)(info);
}
u64 loop = 0;
check_barrier(&lock->debug);
- do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
+ do { smp_mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
if ((loop > 1) && lock->profile)
{
lock->profile->time_block += NOW() - block;
}
#else
check_barrier(&lock->debug);
- do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
+ do { smp_mb(); } while ( _raw_spin_is_locked(&lock->raw) );
#endif
- mb();
+ smp_mb();
}
int _spin_trylock_recursive(spinlock_t *lock)
return -EFAULT;
}
}
- mb();
+ smp_mb();
if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
tmh_copy_page(tmem_va, cli_va);
else if ( (tmem_offset+len <= PAGE_SIZE) &&
return 0;
else if ( copy_from_guest(scratch, clibuf, PAGE_SIZE) )
return -EFAULT;
- mb();
+ smp_mb();
ret = lzo1x_1_compress(cli_va ?: scratch, PAGE_SIZE, dmem, out_len, wmem);
ASSERT(ret == LZO_E_OK);
*out_va = dmem;
unmap_domain_page(tmem_va);
if ( cli_va )
cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
- mb();
+ smp_mb();
return rc;
}
cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
return -EFAULT;
- mb();
+ smp_mb();
return 1;
}
if ( len < PAGE_SIZE )
memset((char *)cli_va+len,0,PAGE_SIZE-len);
cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
- mb();
+ smp_mb();
return 1;
}
opt_tbuf_size = pages;
printk("xentrace: initialised\n");
- wmb(); /* above must be visible before tb_init_done flag set */
+ smp_wmb(); /* above must be visible before tb_init_done flag set */
tb_init_done = 1;
return 0;
int i;
tb_init_done = 0;
- wmb();
+ smp_wmb();
/* Clear any lost-record info so we don't get phantom lost records next time we
* start tracing. Grab the lock to make sure we're not racing anyone. After this
* hypercall returns, no more records should be placed into the buffers. */
memcpy(next_page, (char *)rec + remaining, rec_size - remaining);
}
- wmb();
+ smp_wmb();
next += rec_size;
if ( next >= 2*data_size )
return;
/* Read tb_init_done /before/ t_bufs. */
- rmb();
+ smp_rmb();
spin_lock_irqsave(&this_cpu(t_lock), flags);
for ( i = conringc ; i != conringp; i++ )
ring[i & (opt_conring_size - 1)] = conring[i & (conring_size - 1)];
conring = ring;
- wmb(); /* Allow users of console_force_unlock() to see larger buffer. */
+ smp_wmb(); /* Allow users of console_force_unlock() to see larger buffer. */
conring_size = opt_conring_size;
spin_unlock_irq(&console_lock);
if ( condition ) \
break; \
set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
- mb(); /* set blocked status /then/ re-evaluate condition */ \
+ smp_mb(); /* set blocked status /then/ re-evaluate condition */ \
if ( condition ) \
{ \
clear_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
do { \
set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
raise_softirq(SCHEDULE_SOFTIRQ); \
- mb(); /* set blocked status /then/ caller does his work */ \
+ smp_mb(); /* set blocked status /then/ caller does his work */ \
} while ( 0 )
#endif /* __XEN_EVENT_H__ */
if ( prev )
{
newnode->next = prev->next;
- wmb();
+ smp_wmb();
prev->next = newnode;
}
else
{
newnode->next = s->htable[hvalue];
- wmb();
+ smp_wmb();
s->htable[hvalue] = newnode;
}