/* extend r/w permissioms and keep aggregate */
ivrs_mappings[bdf].write_permission = iw;
ivrs_mappings[bdf].read_permission = ir;
- ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].unity_map_enable = true;
ivrs_mappings[bdf].addr_range_start = base;
ivrs_mappings[bdf].addr_range_length = length;
}
if ( limit >= iommu_top )
{
reserve_iommu_exclusion_range(iommu, base, limit);
- ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
- ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED;
+ ivrs_mappings[bdf].dte_allow_exclusion = true;
+ ivrs_mappings[req].dte_allow_exclusion = true;
}
return 0;
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
{
ivrs_mappings[bdf].dte_requestor_id = bdf;
- ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED;
- ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED;
- ivrs_mappings[bdf].iommu = NULL;
-
- ivrs_mappings[bdf].intremap_table = NULL;
- ivrs_mappings[bdf].device_flags = 0;
if ( amd_iommu_perdev_intremap )
spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
};
struct ivrs_mappings {
- u16 dte_requestor_id;
- u8 dte_allow_exclusion;
- u8 unity_map_enable;
- u8 write_permission;
- u8 read_permission;
- bool valid;
+ uint16_t dte_requestor_id;
+ bool valid:1;
+ bool dte_allow_exclusion:1;
+ bool unity_map_enable:1;
+ bool write_permission:1;
+ bool read_permission:1;
+
+ /* ivhd device data settings */
+ uint8_t device_flags;
+
unsigned long addr_range_start;
unsigned long addr_range_length;
struct amd_iommu *iommu;
void *intremap_table;
unsigned long *intremap_inuse;
spinlock_t intremap_lock;
-
- /* ivhd device data settings */
- u8 device_flags;
};
extern unsigned int ivrs_bdf_entries;