};
struct ring_buffer {
+ spinlock_t lock; /* protect buffer pointers */
void *buffer;
- unsigned long entries;
- unsigned long alloc_size;
uint32_t tail;
uint32_t head;
- spinlock_t lock; /* protect buffer pointers */
+ uint32_t size;
};
typedef struct iommu_cap {
return !!(iommu->cap.header & (1u << bit));
}
-/* access tail or head pointer of ring buffer */
-static inline uint32_t iommu_get_rb_pointer(uint32_t reg)
-{
- return get_field_from_reg_u32(reg, IOMMU_RING_BUFFER_PTR_MASK,
- IOMMU_RING_BUFFER_PTR_SHIFT);
-}
-
-static inline void iommu_set_rb_pointer(uint32_t *reg, uint32_t val)
-{
- set_field_in_reg_u32(val, *reg, IOMMU_RING_BUFFER_PTR_MASK,
- IOMMU_RING_BUFFER_PTR_SHIFT, reg);
-}
-
/* access device id field from iommu cmd */
static inline uint16_t iommu_get_devid_from_cmd(uint32_t cmd)
{
{
uint32_t tail, head;
- tail = iommu->cmd_buffer.tail;
- if ( ++tail == iommu->cmd_buffer.entries )
+ tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
+ if ( tail == iommu->cmd_buffer.size )
tail = 0;
- head = iommu_get_rb_pointer(readl(iommu->mmio_base +
- IOMMU_CMD_BUFFER_HEAD_OFFSET));
+ head = readl(iommu->mmio_base +
+ IOMMU_CMD_BUFFER_HEAD_OFFSET) & IOMMU_RING_BUFFER_PTR_MASK;
if ( head != tail )
{
- memcpy(iommu->cmd_buffer.buffer +
- (iommu->cmd_buffer.tail * IOMMU_CMD_BUFFER_ENTRY_SIZE),
+ memcpy(iommu->cmd_buffer.buffer + iommu->cmd_buffer.tail,
cmd, IOMMU_CMD_BUFFER_ENTRY_SIZE);
iommu->cmd_buffer.tail = tail;
static void commit_iommu_command_buffer(struct amd_iommu *iommu)
{
- u32 tail = 0;
-
- iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail);
- writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
+ writel(iommu->cmd_buffer.tail,
+ iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
}
-int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
+static int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
{
if ( queue_iommu_command(iommu, cmd) )
{
send_iommu_command(iommu, cmd);
}
-void invalidate_iommu_all(struct amd_iommu *iommu)
+static void invalidate_iommu_all(struct amd_iommu *iommu)
{
u32 cmd[4], entry;
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
+ power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.size) +
IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes(iommu->event_log.size) +
IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes(iommu->ppr_log.size) +
IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
unsigned int entry_size,
void (*parse_func)(struct amd_iommu *, u32 *))
{
- u32 tail, head, *entry, tail_offest, head_offset;
+ u32 tail, *entry, tail_offest, head_offset;
BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log)));
IOMMU_EVENT_LOG_HEAD_OFFSET :
IOMMU_PPR_LOG_HEAD_OFFSET;
- tail = readl(iommu->mmio_base + tail_offest);
- tail = iommu_get_rb_pointer(tail);
+ tail = readl(iommu->mmio_base + tail_offest) & IOMMU_RING_BUFFER_PTR_MASK;
while ( tail != log->head )
{
/* read event log entry */
- entry = (u32 *)(log->buffer + log->head * entry_size);
+ entry = log->buffer + log->head;
parse_func(iommu, entry);
- if ( ++log->head == log->entries )
+
+ log->head += entry_size;
+ if ( log->head == log->size )
log->head = 0;
/* update head pointer */
- head = 0;
- iommu_set_rb_pointer(&head, log->head);
-
- writel(head, iommu->mmio_base + head_offset);
+ writel(log->head, iommu->mmio_base + head_offset);
}
spin_unlock(&log->lock);
static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
{
- deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size);
+ deallocate_buffer(ring_buf->buffer, ring_buf->size);
ring_buf->buffer = NULL;
ring_buf->head = 0;
ring_buf->tail = 0;
ring_buf->tail = 0;
spin_lock_init(&ring_buf->lock);
-
- ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
- entry_size);
- ring_buf->entries = ring_buf->alloc_size / entry_size;
- ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name, clear);
+
+ ring_buf->size = PAGE_SIZE << get_order_from_bytes(entries * entry_size);
+ ring_buf->buffer = allocate_buffer(ring_buf->size, name, clear);
return ring_buf->buffer;
}