offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
- reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
+ reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * NR_BITS_PER_TARGET);
return reg;
}
/* The vCPU ID always starts from 0 */
new_target--;
- old_target = rank->vcpu[offset];
+ old_target = read_atomic(&rank->vcpu[offset]);
/* Only migrate the vIRQ if the target vCPU has changed */
if ( new_target != old_target )
virq);
}
- rank->vcpu[offset] = new_target;
+ write_atomic(&rank->vcpu[offset], new_target);
}
}
/* Get the index in the rank */
offset &= INTERRUPT_RANK_MASK;
- return vcpuid_to_vaffinity(rank->vcpu[offset]);
+ return vcpuid_to_vaffinity(read_atomic(&rank->vcpu[offset]));
}
/*
offset &= virq & INTERRUPT_RANK_MASK;
new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
- old_vcpu = d->vcpu[rank->vcpu[offset]];
+ old_vcpu = d->vcpu[read_atomic(&rank->vcpu[offset])];
/*
* From the spec (see 8.9.13 in IHI 0069A), any write with an
if ( new_vcpu != old_vcpu )
vgic_migrate_irq(old_vcpu, new_vcpu, virq);
- rank->vcpu[offset] = new_vcpu->vcpu_id;
+ write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
}
static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
rank->index = index;
for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
- rank->vcpu[i] = vcpu;
+ write_atomic(&rank->vcpu[i], vcpu);
}
int domain_vgic_init(struct domain *d, unsigned int nr_spis)
return 0;
}
-/* The function should be called by rank lock taken. */
-static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
-{
- struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-
- ASSERT(spin_is_locked(&rank->lock));
-
- return v->domain->vcpu[rank->vcpu[virq & INTERRUPT_RANK_MASK]];
-}
-
-/* takes the rank lock */
struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
{
- struct vcpu *v_target;
struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
- unsigned long flags;
-
- vgic_lock_rank(v, rank, flags);
- v_target = __vgic_get_target_vcpu(v, virq);
- vgic_unlock_rank(v, rank, flags);
-
- return v_target;
+ int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
+ return v->domain->vcpu[target];
}
static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
irq = i + (32 * n);
- v_target = __vgic_get_target_vcpu(v, irq);
+ v_target = vgic_get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
gic_remove_from_queues(v_target, irq);
}
}
- v_target = __vgic_get_target_vcpu(v, irq);
+ v_target = vgic_get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
spin_lock_irqsave(&v_target->arch.vgic.lock, flags);