Most IOMMU hooks are already altcall for performance reasons. Convert the
rest of them so we can harden all the hooks in Control Flow Integrity
configurations. This necessitates the use of iommu_{v,}call() in debug builds
too. Switch to using an ASSERT() as all forms should resolve to &iommu_ops.
Move the root iommu_ops from __read_mostly to __ro_after_init now that the
latter exists.
Since c/s
3330013e6739 ("VT-d / x86: re-arrange cache syncing"), vtd_ops is
not modified and doesn't need a forward declaration, so we can use
__initconst_cf_clobber for both VT-d and AMD.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
extern struct iommu_ops iommu_ops;
-#ifdef NDEBUG
# include <asm/alternative.h>
# define iommu_call(ops, fn, args...) ({ \
- (void)(ops); \
+ ASSERT((ops) == &iommu_ops); \
alternative_call(iommu_ops.fn, ## args); \
})
# define iommu_vcall(ops, fn, args...) ({ \
- (void)(ops); \
+ ASSERT((ops) == &iommu_ops); \
alternative_vcall(iommu_ops.fn, ## args); \
})
-#endif
static inline const struct iommu_ops *iommu_get_ops(void)
{
static inline int iommu_adjust_irq_affinities(void)
{
return iommu_ops.adjust_irq_affinities
- ? iommu_ops.adjust_irq_affinities()
+ ? iommu_call(&iommu_ops, adjust_irq_affinities)
: 0;
}
static inline void iommu_disable_x2apic(void)
{
if ( x2apic_enabled && iommu_ops.disable_x2apic )
- iommu_ops.disable_x2apic();
+ iommu_vcall(&iommu_ops, disable_x2apic);
}
int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma,
hd->arch.amd.paging_mode, 0, 0);
}
-static const struct iommu_ops __initconstrel _iommu_ops = {
+static const struct iommu_ops __initconst_cf_clobber _iommu_ops = {
.init = amd_iommu_domain_init,
.hwdom_init = amd_iommu_hwdom_init,
.quarantine_init = amd_iommu_quarantine_init,
int iommu_suspend()
{
if ( iommu_enabled )
- return iommu_get_ops()->suspend();
+ return iommu_call(iommu_get_ops(), suspend);
return 0;
}
void iommu_resume()
{
if ( iommu_enabled )
- iommu_get_ops()->resume();
+ iommu_vcall(iommu_get_ops(), resume);
}
int iommu_do_domctl(
return;
if ( iommu_enabled )
- iommu_get_ops()->crash_shutdown();
+ iommu_vcall(iommu_get_ops(), crash_shutdown);
+
iommu_enabled = false;
#ifndef iommu_intremap
iommu_intremap = iommu_intremap_off;
static unsigned int __read_mostly nr_iommus;
-static struct iommu_ops vtd_ops;
static struct tasklet vtd_fault_tasklet;
static int cf_check setup_hwdom_device(u8 devfn, struct pci_dev *);
return rc;
}
-static struct iommu_ops __initdata vtd_ops = {
+static const struct iommu_ops __initconst_cf_clobber vtd_ops = {
.init = intel_iommu_domain_init,
.hwdom_init = intel_iommu_hwdom_init,
.quarantine_init = intel_iommu_quarantine_init,
#include <asm/setup.h>
const struct iommu_init_ops *__initdata iommu_init_ops;
-struct iommu_ops __read_mostly iommu_ops;
+struct iommu_ops __ro_after_init iommu_ops;
bool __read_mostly iommu_non_coherent;
enum iommu_intremap __read_mostly iommu_intremap = iommu_intremap_full;
if ( !iommu_ops.enable_x2apic )
return -EOPNOTSUPP;
- return iommu_ops.enable_x2apic();
+ return iommu_call(&iommu_ops, enable_x2apic);
}
void iommu_update_ire_from_apic(