csize = vgic_v2_hw.csize;
vbase = vgic_v2_hw.vbase;
}
+ else if ( is_domain_direct_mapped(d) )
+ {
+ /*
+ * For non-dom0 direct_mapped guests we only map a 8kB CPU
+ * interface but we make sure it is at a location occupied by
+ * the physical GIC in the host device tree.
+ *
+ * We need to add an offset to the virtual CPU interface base
+ * address when the GIC is aliased to get a 8kB contiguous
+ * region.
+ */
+ d->arch.vgic.dbase = vgic_v2_hw.dbase;
+ d->arch.vgic.cbase = vgic_v2_hw.cbase + vgic_v2_hw.aliased_offset;
+ csize = GUEST_GICC_SIZE;
+ vbase = vgic_v2_hw.vbase + vgic_v2_hw.aliased_offset;
+ }
else
{
d->arch.vgic.dbase = GUEST_GICD_BASE;
csize = gic_v2_hw_data.csize;
vbase = gic_v2_hw_data.vbase;
}
+ else if ( is_domain_direct_mapped(d) )
+ {
+ d->arch.vgic.vgic_dist_base = gic_v2_hw_data.dbase;
+ /*
+ * For non-dom0 direct_mapped guests we only map a 8kB CPU
+ * interface but we make sure it is at a location occupied by
+ * the physical GIC in the host device tree.
+ *
+ * We need to add an offset to the virtual CPU interface base
+ * address when the GIC is aliased to get a 8kB contiguous
+ * region.
+ */
+ d->arch.vgic.vgic_cpu_base = gic_v2_hw_data.cbase +
+ gic_v2_hw_data.aliased_offset;
+ csize = GUEST_GICC_SIZE;
+ vbase = gic_v2_hw_data.vbase + gic_v2_hw_data.aliased_offset;
+ }
else
{
d->arch.vgic.vgic_dist_base = GUEST_GICD_BASE;
static inline paddr_t vgic_cpu_base(struct vgic_dist *vgic)
{
- return GUEST_GICC_BASE;
+ return vgic->vgic_cpu_base;
}
static inline paddr_t vgic_dist_base(struct vgic_dist *vgic)
{
- return GUEST_GICD_BASE;
+ return vgic->vgic_dist_base;
}
static inline unsigned int vgic_rdist_nr(struct vgic_dist *vgic)
static inline paddr_t vgic_cpu_base(struct vgic_dist *vgic)
{
- return GUEST_GICD_BASE;
+ return vgic->cbase;
}
static inline paddr_t vgic_dist_base(struct vgic_dist *vgic)
{
- return GUEST_GICD_BASE;
+ return vgic->dbase;
}
#ifdef CONFIG_GICV3