static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
{
- bool_t com_wait_int_en, com_wait_int, i, s;
+ bool com_wait_int, i, s;
struct guest_iommu *iommu;
unsigned long gfn;
p2m_type_t p2mt;
unmap_domain_page(vaddr);
}
- com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo,
- IOMMU_CONTROL_COMP_WAIT_INT_SHIFT);
com_wait_int = iommu_get_bit(iommu->reg_status.lo,
IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
- if ( com_wait_int_en && com_wait_int )
+ if ( iommu->reg_ctrl.com_wait_int_en && com_wait_int )
guest_iommu_deliver_msi(d);
return 0;
return;
}
-static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t newctrl)
+static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t val)
{
- bool_t cmd_en, event_en, iommu_en, ppr_en, ppr_log_en;
- bool_t cmd_en_old, event_en_old, iommu_en_old;
- bool_t cmd_run;
+ union amd_iommu_control newctrl = { .raw = val };
- iommu_en = iommu_get_bit(newctrl,
- IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
- iommu_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
- IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
-
- cmd_en = iommu_get_bit(newctrl,
- IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
- cmd_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
- IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
- cmd_run = iommu_get_bit(iommu->reg_status.lo,
- IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT);
- event_en = iommu_get_bit(newctrl,
- IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
- event_en_old = iommu_get_bit(iommu->reg_ctrl.lo,
- IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
-
- ppr_en = iommu_get_bit(newctrl,
- IOMMU_CONTROL_PPR_ENABLE_SHIFT);
- ppr_log_en = iommu_get_bit(newctrl,
- IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
-
- if ( iommu_en )
+ if ( newctrl.iommu_en )
{
guest_iommu_enable(iommu);
guest_iommu_enable_dev_table(iommu);
}
- if ( iommu_en && cmd_en )
+ if ( newctrl.iommu_en && newctrl.cmd_buf_en )
{
guest_iommu_enable_ring_buffer(iommu, &iommu->cmd_buffer,
sizeof(cmd_entry_t));
tasklet_schedule(&iommu->cmd_buffer_tasklet);
}
- if ( iommu_en && event_en )
+ if ( newctrl.iommu_en && newctrl.event_log_en )
{
guest_iommu_enable_ring_buffer(iommu, &iommu->event_log,
sizeof(event_entry_t));
guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
}
- if ( iommu_en && ppr_en && ppr_log_en )
+ if ( newctrl.iommu_en && newctrl.ppr_en && newctrl.ppr_log_en )
{
guest_iommu_enable_ring_buffer(iommu, &iommu->ppr_log,
sizeof(ppr_entry_t));
guest_iommu_clear_status(iommu, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT);
}
- if ( iommu_en && cmd_en_old && !cmd_en )
+ if ( newctrl.iommu_en && iommu->reg_ctrl.cmd_buf_en &&
+ !newctrl.cmd_buf_en )
{
/* Disable iommu command processing */
tasklet_kill(&iommu->cmd_buffer_tasklet);
}
- if ( event_en_old && !event_en )
+ if ( iommu->reg_ctrl.event_log_en && !newctrl.event_log_en )
guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT);
- if ( iommu_en_old && !iommu_en )
+ if ( iommu->reg_ctrl.iommu_en && !newctrl.iommu_en )
guest_iommu_disable(iommu);
- u64_to_reg(&iommu->reg_ctrl, newctrl);
+ iommu->reg_ctrl = newctrl;
+
return 0;
}
val = reg_to_u64(iommu->ppr_log.reg_tail);
break;
case IOMMU_CONTROL_MMIO_OFFSET:
- val = reg_to_u64(iommu->reg_ctrl);
+ val = iommu->reg_ctrl.raw;
break;
case IOMMU_STATUS_MMIO_OFFSET:
val = reg_to_u64(iommu->reg_status);
struct table_struct device_table;
bool_t iommuv2_enabled;
-static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
+static bool iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
{
return iommu->ht_flags & mask;
}
static void set_iommu_ht_flags(struct amd_iommu *iommu)
{
- u32 entry;
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
-
/* Setup HT flags */
if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) )
- iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) ?
- iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT) :
- iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT);
-
- iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) ?
- iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT):
- iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT);
+ iommu->ctrl.ht_tun_en = iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE);
- iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) ?
- iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT):
- iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT);
-
- iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) ?
- iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT):
- iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT);
+ iommu->ctrl.pass_pw = iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW);
+ iommu->ctrl.res_pass_pw = iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW);
+ iommu->ctrl.isoc = iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC);
/* Force coherent */
- iommu_set_bit(&entry, IOMMU_CONTROL_COHERENT_SHIFT);
+ iommu->ctrl.coherent = true;
- writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
}
static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
static void set_iommu_translation_control(struct amd_iommu *iommu,
- int enable)
+ bool enable)
{
- u32 entry;
+ iommu->ctrl.iommu_en = enable;
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
-
- enable ?
- iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT) :
- iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT);
-
- writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
}
static void set_iommu_guest_translation_control(struct amd_iommu *iommu,
- int enable)
+ bool enable)
{
- u32 entry;
-
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.gt_en = enable;
- enable ?
- iommu_set_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT) :
- iommu_clear_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT);
-
- writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
if ( enable )
AMD_IOMMU_DEBUG("Guest Translation Enabled.\n");
}
static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
- int enable)
+ bool enable)
{
- u32 entry;
-
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
-
- /*reset head and tail pointer manually before enablement */
+ /* Reset head and tail pointer manually before enablement */
if ( enable )
{
writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
-
- iommu_set_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
}
- else
- iommu_clear_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT);
- writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.cmd_buf_en = enable;
+
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
}
static void register_iommu_exclusion_range(struct amd_iommu *iommu)
}
static void set_iommu_event_log_control(struct amd_iommu *iommu,
- int enable)
+ bool enable)
{
- u32 entry;
-
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
-
- /*reset head and tail pointer manually before enablement */
+ /* Reset head and tail pointer manually before enablement */
if ( enable )
{
writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
-
- iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
- iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
- }
- else
- {
- iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
- iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT);
}
- iommu_clear_bit(&entry, IOMMU_CONTROL_COMP_WAIT_INT_SHIFT);
+ iommu->ctrl.event_int_en = enable;
+ iommu->ctrl.event_log_en = enable;
+ iommu->ctrl.com_wait_int_en = false;
- writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
}
static void set_iommu_ppr_log_control(struct amd_iommu *iommu,
- int enable)
+ bool enable)
{
- u32 entry;
-
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
-
- /*reset head and tail pointer manually before enablement */
+ /* Reset head and tail pointer manually before enablement */
if ( enable )
{
writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_HEAD_OFFSET);
writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_TAIL_OFFSET);
-
- iommu_set_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT);
- iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
- iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
- }
- else
- {
- iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT);
- iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
- iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT);
}
- writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.ppr_en = enable;
+ iommu->ctrl.ppr_int_en = enable;
+ iommu->ctrl.ppr_log_en = enable;
+
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+
if ( enable )
AMD_IOMMU_DEBUG("PPR Log Enabled.\n");
}
/* reset event log or ppr log when overflow */
static void iommu_reset_log(struct amd_iommu *iommu,
struct ring_buffer *log,
- void (*ctrl_func)(struct amd_iommu *iommu, int))
+ void (*ctrl_func)(struct amd_iommu *iommu, bool))
{
u32 entry;
int log_run, run_bit;
iommu_reset_log(iommu, &iommu->event_log, set_iommu_event_log_control);
else
{
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
- if ( !(entry & IOMMU_CONTROL_EVENT_LOG_INT_MASK) )
+ if ( !iommu->ctrl.event_int_en )
{
- entry |= IOMMU_CONTROL_EVENT_LOG_INT_MASK;
- writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.event_int_en = true;
+ writeq(iommu->ctrl.raw,
+ iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
/*
* Re-schedule the tasklet to handle eventual log entries added
* between reading the log above and re-enabling the interrupt.
iommu_reset_log(iommu, &iommu->ppr_log, set_iommu_ppr_log_control);
else
{
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
- if ( !(entry & IOMMU_CONTROL_PPR_LOG_INT_MASK) )
+ if ( !iommu->ctrl.ppr_int_en )
{
- entry |= IOMMU_CONTROL_PPR_LOG_INT_MASK;
- writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.ppr_int_en = true;
+ writeq(iommu->ctrl.raw,
+ iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
/*
* Re-schedule the tasklet to handle eventual log entries added
* between reading the log above and re-enabling the interrupt.
static void iommu_interrupt_handler(int irq, void *dev_id,
struct cpu_user_regs *regs)
{
- u32 entry;
unsigned long flags;
struct amd_iommu *iommu = dev_id;
* Silence interrupts from both event and PPR by clearing the
* enable logging bits in the control register
*/
- entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
- iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT);
- iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT);
- writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
+ iommu->ctrl.event_int_en = false;
+ iommu->ctrl.ppr_int_en = false;
+ writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
spin_unlock_irqrestore(&iommu->lock, flags);
/* Control Register */
#define IOMMU_CONTROL_MMIO_OFFSET 0x18
-#define IOMMU_CONTROL_TRANSLATION_ENABLE_MASK 0x00000001
-#define IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT 0
-#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK 0x00000002
-#define IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT 1
-#define IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK 0x00000004
-#define IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT 2
-#define IOMMU_CONTROL_EVENT_LOG_INT_MASK 0x00000008
-#define IOMMU_CONTROL_EVENT_LOG_INT_SHIFT 3
-#define IOMMU_CONTROL_COMP_WAIT_INT_MASK 0x00000010
-#define IOMMU_CONTROL_COMP_WAIT_INT_SHIFT 4
-#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_MASK 0x000000E0
-#define IOMMU_CONTROL_INVALIDATION_TIMEOUT_SHIFT 5
-#define IOMMU_CONTROL_PASS_POSTED_WRITE_MASK 0x00000100
-#define IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT 8
-#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK 0x00000200
-#define IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT 9
-#define IOMMU_CONTROL_COHERENT_MASK 0x00000400
-#define IOMMU_CONTROL_COHERENT_SHIFT 10
-#define IOMMU_CONTROL_ISOCHRONOUS_MASK 0x00000800
-#define IOMMU_CONTROL_ISOCHRONOUS_SHIFT 11
-#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK 0x00001000
-#define IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT 12
-#define IOMMU_CONTROL_PPR_LOG_ENABLE_MASK 0x00002000
-#define IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT 13
-#define IOMMU_CONTROL_PPR_LOG_INT_MASK 0x00004000
-#define IOMMU_CONTROL_PPR_LOG_INT_SHIFT 14
-#define IOMMU_CONTROL_PPR_ENABLE_MASK 0x00008000
-#define IOMMU_CONTROL_PPR_ENABLE_SHIFT 15
-#define IOMMU_CONTROL_GT_ENABLE_MASK 0x00010000
-#define IOMMU_CONTROL_GT_ENABLE_SHIFT 16
-#define IOMMU_CONTROL_RESTART_MASK 0x80000000
-#define IOMMU_CONTROL_RESTART_SHIFT 31
+
+union amd_iommu_control {
+ uint64_t raw;
+ struct {
+ bool iommu_en:1;
+ bool ht_tun_en:1;
+ bool event_log_en:1;
+ bool event_int_en:1;
+ bool com_wait_int_en:1;
+ unsigned int inv_timeout:3;
+ bool pass_pw:1;
+ bool res_pass_pw:1;
+ bool coherent:1;
+ bool isoc:1;
+ bool cmd_buf_en:1;
+ bool ppr_log_en:1;
+ bool ppr_int_en:1;
+ bool ppr_en:1;
+ bool gt_en:1;
+ bool ga_en:1;
+ unsigned int crw:4;
+ bool smif_en:1;
+ bool slf_wb_dis:1;
+ bool smif_log_en:1;
+ unsigned int gam_en:3;
+ bool ga_log_en:1;
+ bool ga_int_en:1;
+ unsigned int dual_ppr_log_en:2;
+ unsigned int dual_event_log_en:2;
+ unsigned int dev_tbl_seg_en:3;
+ unsigned int priv_abrt_en:2;
+ bool ppr_auto_rsp_en:1;
+ bool marc_en:1;
+ bool blk_stop_mrk_en:1;
+ bool ppr_auto_rsp_aon:1;
+ bool domain_id_pne:1;
+ unsigned int :1;
+ bool eph_en:1;
+ unsigned int had_update:2;
+ bool gd_update_dis:1;
+ unsigned int :1;
+ bool xt_en:1;
+ bool int_cap_xt_en:1;
+ bool vcmd_en:1;
+ bool viommu_en:1;
+ bool ga_update_dis:1;
+ bool gappi_en:1;
+ unsigned int :8;
+ };
+};
/* Exclusion Register */
#define IOMMU_EXCLUSION_BASE_LOW_OFFSET 0x20