return vgic_handle_cfg_reg(reg, mmio, offset);
}
+/* We don't trigger any actions here, just store the register value */
+static bool handle_mmio_propbaser_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE;
+
+ /* Storing a value with LPIs already enabled is undefined */
+ mode |= dist->lpis_enabled ? ACCESS_WRITE_IGNORED : ACCESS_WRITE_VALUE;
+ vgic_handle_base_register(vcpu, mmio, offset, &dist->propbaser, mode);
+
+ return false;
+}
+
+/* We don't trigger any actions here, just store the register value */
+static bool handle_mmio_pendbaser_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ struct kvm_vcpu *rdvcpu = mmio->private;
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ int mode = ACCESS_READ_VALUE;
+
+ /* Storing a value with LPIs already enabled is undefined */
+ mode |= dist->lpis_enabled ? ACCESS_WRITE_IGNORED : ACCESS_WRITE_VALUE;
+ vgic_handle_base_register(vcpu, mmio, offset,
+ &dist->pendbaser[rdvcpu->vcpu_id], mode);
+
+ return false;
+}
+
#define SGI_base(x) ((x) + SZ_64K)
static const struct vgic_io_range vgic_redist_ranges[] = {
.bits_per_irq = 0,
.handle_mmio = handle_mmio_raz_wi,
},
+ {
+ .base = GICR_PENDBASER,
+ .len = 0x08,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_pendbaser_redist,
+ },
+ {
+ .base = GICR_PROPBASER,
+ .len = 0x08,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_propbaser_redist,
+ },
{
.base = GICR_IDREGS,
.len = 0x30,
}
}
+/* handle a 64-bit register access */
+void vgic_handle_base_register(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, u64 *basereg,
+ int mode)
+{
+ u32 reg;
+ u64 breg;
+
+ switch (offset & ~3) {
+ case 0x00:
+ breg = *basereg;
+ reg = lower_32_bits(breg);
+ vgic_reg_access(mmio, ®, offset & 3, mode);
+ if (mmio->is_write && (mode & ACCESS_WRITE_VALUE)) {
+ breg &= GENMASK_ULL(63, 32);
+ breg |= reg;
+ *basereg = breg;
+ }
+ break;
+ case 0x04:
+ breg = *basereg;
+ reg = upper_32_bits(breg);
+ vgic_reg_access(mmio, ®, offset & 3, mode);
+ if (mmio->is_write && (mode & ACCESS_WRITE_VALUE)) {
+ breg = lower_32_bits(breg);
+ breg |= (u64)reg << 32;
+ *basereg = breg;
+ }
+ break;
+ }
+}
+
+
+
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
phys_addr_t offset, int mode);
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
phys_addr_t offset);
+void vgic_handle_base_register(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, u64 *basereg,
+ int mode);
static inline
u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)