return !list_empty(&host_its_list);
}
+#define BASER_ATTR_MASK \
+ ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \
+ (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \
+ (0x7UL << GITS_BASER_INNER_CACHEABILITY_SHIFT))
+#define BASER_RO_MASK (GENMASK(58, 56) | GENMASK(52, 48))
+
+/* Check that the physical address can be encoded in the PROPBASER register. */
+static bool check_baser_phys_addr(void *vaddr, unsigned int page_bits)
+{
+ paddr_t paddr = virt_to_maddr(vaddr);
+
+ return (!(paddr & ~GENMASK(page_bits < 16 ? 47 : 51, page_bits)));
+}
+
+static uint64_t encode_baser_phys_addr(paddr_t addr, unsigned int page_bits)
+{
+ uint64_t ret = addr & GENMASK(47, page_bits);
+
+ if ( page_bits < 16 )
+ return ret;
+
+ /* For 64K pages address bits 51-48 are encoded in bits 15-12. */
+ return ret | ((addr & GENMASK(51, 48)) >> (48 - 12));
+}
+
+/* The ITS BASE registers work with page sizes of 4K, 16K or 64K. */
+#define BASER_PAGE_BITS(sz) ((sz) * 2 + 12)
+
+static int its_map_baser(void __iomem *basereg, uint64_t regc,
+ unsigned int nr_items)
+{
+ uint64_t attr, reg;
+ unsigned int entry_size = GITS_BASER_ENTRY_SIZE(regc);
+ unsigned int pagesz = 2; /* try 64K pages first, then go down. */
+ unsigned int table_size;
+ void *buffer;
+
+ attr = GIC_BASER_InnerShareable << GITS_BASER_SHAREABILITY_SHIFT;
+ attr |= GIC_BASER_CACHE_SameAsInner << GITS_BASER_OUTER_CACHEABILITY_SHIFT;
+ attr |= GIC_BASER_CACHE_RaWaWb << GITS_BASER_INNER_CACHEABILITY_SHIFT;
+
+ /*
+ * Setup the BASE register with the attributes that we like. Then read
+ * it back and see what sticks (page size, cacheability and shareability
+ * attributes), retrying if necessary.
+ */
+retry:
+ table_size = ROUNDUP(nr_items * entry_size, BIT(BASER_PAGE_BITS(pagesz)));
+ /* The BASE registers support at most 256 pages. */
+ table_size = min(table_size, 256U << BASER_PAGE_BITS(pagesz));
+
+ buffer = _xzalloc(table_size, BIT(BASER_PAGE_BITS(pagesz)));
+ if ( !buffer )
+ return -ENOMEM;
+
+ if ( !check_baser_phys_addr(buffer, BASER_PAGE_BITS(pagesz)) )
+ {
+ xfree(buffer);
+ return -ERANGE;
+ }
+
+ reg = attr;
+ reg |= (pagesz << GITS_BASER_PAGE_SIZE_SHIFT);
+ reg |= (table_size >> BASER_PAGE_BITS(pagesz)) - 1;
+ reg |= regc & BASER_RO_MASK;
+ reg |= GITS_VALID_BIT;
+ reg |= encode_baser_phys_addr(virt_to_maddr(buffer),
+ BASER_PAGE_BITS(pagesz));
+
+ writeq_relaxed(reg, basereg);
+ regc = readq_relaxed(basereg);
+
+ /* The host didn't like our attributes, just use what it returned. */
+ if ( (regc & BASER_ATTR_MASK) != attr )
+ {
+ /* If we can't map it shareable, drop cacheability as well. */
+ if ( (regc & GITS_BASER_SHAREABILITY_MASK) == GIC_BASER_NonShareable )
+ {
+ regc &= ~GITS_BASER_INNER_CACHEABILITY_MASK;
+ writeq_relaxed(regc, basereg);
+ }
+ attr = regc & BASER_ATTR_MASK;
+ }
+ if ( (regc & GITS_BASER_INNER_CACHEABILITY_MASK) <= GIC_BASER_CACHE_nC )
+ clean_and_invalidate_dcache_va_range(buffer, table_size);
+
+ /* If the host accepted our page size, we are done. */
+ if ( ((regc >> GITS_BASER_PAGE_SIZE_SHIFT) & 0x3UL) == pagesz )
+ return 0;
+
+ xfree(buffer);
+
+ if ( pagesz-- > 0 )
+ goto retry;
+
+ /* None of the page sizes was accepted, give up */
+ return -EINVAL;
+}
+
static int gicv3_its_init_single_its(struct host_its *hw_its)
{
uint64_t reg;
+ int i, ret;
hw_its->its_base = ioremap_nocache(hw_its->addr, hw_its->size);
if ( !hw_its->its_base )
hw_its->evid_bits = GITS_TYPER_EVENT_ID_BITS(reg);
hw_its->itte_size = GITS_TYPER_ITT_SIZE(reg);
+ for ( i = 0; i < GITS_BASER_NR_REGS; i++ )
+ {
+ void __iomem *basereg = hw_its->its_base + GITS_BASER0 + i * 8;
+ unsigned int type;
+
+ reg = readq_relaxed(basereg);
+ type = (reg & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT;
+ switch ( type )
+ {
+ case GITS_BASER_TYPE_NONE:
+ continue;
+ case GITS_BASER_TYPE_DEVICE:
+ ret = its_map_baser(basereg, reg, BIT(hw_its->devid_bits));
+ if ( ret )
+ return ret;
+ break;
+ case GITS_BASER_TYPE_COLLECTION:
+ ret = its_map_baser(basereg, reg, num_possible_cpus());
+ if ( ret )
+ return ret;
+ break;
+ /* In case this is a GICv4, provide a (dummy) vPE table as well. */
+ case GITS_BASER_TYPE_VCPU:
+ ret = its_map_baser(basereg, reg, 1);
+ if ( ret )
+ return ret;
+ break;
+ default:
+ continue;
+ }
+ }
+
return 0;
}
#define GITS_BASER7 0x138
/* Register bits */
+#define GITS_VALID_BIT BIT(63)
+
+#define GITS_CTLR_QUIESCENT BIT(31)
+#define GITS_CTLR_ENABLE BIT(0)
+
#define GITS_TYPER_DEVIDS_SHIFT 13
#define GITS_TYPER_DEVIDS_MASK (0x1fUL << GITS_TYPER_DEVIDS_SHIFT)
#define GITS_TYPER_DEVICE_ID_BITS(r) (((r & GITS_TYPER_DEVIDS_MASK) >> \
#define GITS_TYPER_ITT_SIZE(r) ((((r) & GITS_TYPER_ITT_SIZE_MASK) >> \
GITS_TYPER_ITT_SIZE_SHIFT) + 1)
+#define GITS_BASER_INDIRECT BIT(62)
+#define GITS_BASER_INNER_CACHEABILITY_SHIFT 59
+#define GITS_BASER_TYPE_SHIFT 56
+#define GITS_BASER_TYPE_MASK (7ULL << GITS_BASER_TYPE_SHIFT)
+#define GITS_BASER_OUTER_CACHEABILITY_SHIFT 53
+#define GITS_BASER_TYPE_NONE 0UL
+#define GITS_BASER_TYPE_DEVICE 1UL
+#define GITS_BASER_TYPE_VCPU 2UL
+#define GITS_BASER_TYPE_CPU 3UL
+#define GITS_BASER_TYPE_COLLECTION 4UL
+#define GITS_BASER_TYPE_RESERVED5 5UL
+#define GITS_BASER_TYPE_RESERVED6 6UL
+#define GITS_BASER_TYPE_RESERVED7 7UL
+#define GITS_BASER_ENTRY_SIZE_SHIFT 48
+#define GITS_BASER_ENTRY_SIZE(reg) \
+ (((reg >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_BASER_SHAREABILITY_SHIFT 10
+#define GITS_BASER_PAGE_SIZE_SHIFT 8
+#define GITS_BASER_SHAREABILITY_MASK (0x3ULL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_OUTER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_OUTER_CACHEABILITY_SHIFT)
+#define GITS_BASER_INNER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_INNER_CACHEABILITY_SHIFT)
+
#include <xen/device_tree.h>
/* data structure for each hardware ITS */