while ( pending )
{
- unsigned int l1 = find_first_set_bit(pending);
+ unsigned int l1 = ffsl(pending) - 1;
unsigned long evtchn = xchg(&XEN_shared_info->evtchn_pending[l1], 0);
__clear_bit(l1, &pending);
evtchn &= ~XEN_shared_info->evtchn_mask[l1];
while ( evtchn )
{
- unsigned int port = find_first_set_bit(evtchn);
+ unsigned int port = ffsl(evtchn) - 1;
__clear_bit(port, &evtchn);
port += l1 * BITS_PER_LONG;
order = get_order_from_pages(end - start + 1);
order = min(order ? order - 1 : 0, max_order);
/* The order allocated and populated must be aligned to the address. */
- order = min(order, start ? find_first_set_bit(start) : MAX_ORDER);
+ order = min(order, start ? ffsl(start) - 1U : MAX_ORDER + 0U);
page = alloc_domheap_pages(d, order, dom0_memflags | MEMF_no_scrub);
if ( page == NULL )
{
* enabled pick the first irq.
*/
timer_config(h, tn) |=
- MASK_INSR(find_first_set_bit(timer_int_route_cap(h, tn)),
+ MASK_INSR(ffs(timer_int_route_cap(h, tn)) - 1,
HPET_TN_ROUTE);
}
{
bool active;
- i = find_first_set_bit(new_val);
+ i = ffsl(new_val) - 1;
if ( i >= HPET_TIMER_NUM )
break;
__clear_bit(i, &new_val);
/* stop/start timers whos state was changed by this write. */
while (stop_timers)
{
- i = find_first_set_bit(stop_timers);
+ i = ffsl(stop_timers) - 1;
__clear_bit(i, &stop_timers);
hpet_stop_timer(h, i, guest_time);
}
while (start_timers)
{
- i = find_first_set_bit(start_timers);
+ i = ffsl(start_timers) - 1;
__clear_bit(i, &start_timers);
hpet_set_timer(h, i, guest_time);
}
/* Step 1: Reduce markers in lower numbered entries. */
while ( i )
{
- b = find_first_set_bit(i);
+ b = ffs(i) - 1;
i &= ~(1U << b);
if ( GET_MARKER(pt[i]) <= b )
break;
{
unsigned int cpu;
- vcpu_id = find_first_set_bit(vmask);
+ vcpu_id = ffsl(vmask) - 1;
vmask &= ~(1UL << vcpu_id);
vcpu_id += vcpu_bias;
if ( (vcpu_id >= d->max_vcpus) )
p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
{
unsigned long left = 1UL << order, ret = 0;
- unsigned int chunk_order = find_first_set_bit(gfn_x(gfn) | left);
+ unsigned int chunk_order = ffsl(gfn_x(gfn) | left) - 1;
do {
ret += decrease_reservation(d, gfn, chunk_order);
unsigned int order)
{
unsigned long left = 1UL << order;
- unsigned int chunk_order = find_first_set_bit(gfn | left);
+ unsigned int chunk_order = ffsl(gfn | left) - 1;
int rc;
if ( !paging_mode_translate(d) )
if ( unlikely(!avail[nid]) )
{
bool use_tail = IS_ALIGNED(s, 1UL << MAX_ORDER) &&
- (find_first_set_bit(e) <= find_first_set_bit(s));
+ (ffsl(e) <= ffsl(s));
unsigned long n;
n = init_node_heap(nid, s, nr_pages, &use_tail);
|| cpu_is_offline(cpu) )
break;
- i = find_first_set_bit(pending);
+ i = ffsl(pending) - 1;
clear_bit(i, &softirq_pending(cpu));
(*softirq_handlers[i])();
}
ASSERT(!pde->u);
if ( pde > table )
- ASSERT(pde->ign0 == find_first_set_bit(pde - table));
+ ASSERT(pde->ign0 == ffs(pde - table) - 1);
else
ASSERT(pde->ign0 == CONTIG_LEVEL_SHIFT);
{
unsigned long res = dfn_x(dfn) | mfn_x(mfn);
unsigned long sizes = hd->platform_ops->page_sizes;
- unsigned int bit = find_first_set_bit(sizes), order = 0;
+ unsigned int bit = ffsl(sizes) - 1, order = 0;
ASSERT(bit == PAGE_SHIFT);
{
unsigned long mask;
- bit = find_first_set_bit(sizes);
+ bit = ffsl(sizes) - 1;
mask = (1UL << bit) - 1;
if ( nr <= mask || (res & mask) )
break;
if ( contig_mask )
{
/* See pt-contig-markers.h for a description of the marker scheme. */
- unsigned int i, shift = find_first_set_bit(contig_mask);
+ unsigned int i, shift = ffsl(contig_mask) - 1;
ASSERT((CONTIG_LEVEL_SHIFT & (contig_mask >> shift)) == CONTIG_LEVEL_SHIFT);
for ( i = 4; i < PAGE_SIZE / sizeof(*p); i += 4 )
{
- p[i + 0] = (find_first_set_bit(i) + 0ULL) << shift;
+ p[i + 0] = (ffsl(i) - 1ULL) << shift;
p[i + 1] = 0;
p[i + 2] = 1ULL << shift;
p[i + 3] = 0;