};
UK_TAILQ_HEAD(pf_device_list, struct pf_device);
+#if CONFIG_PAGING
+/**
+ * (Re)map device memory
+ *
+ * Maps a region with attributes suitable for device memory access.
+ * If the region is already mapped, it will be remapped unconditionally.
+ *
+ * @param base Base address of the region to map.
+ * @param size Region size. Must be page-aligned.
+ * @return virtual address
+ */
+__vaddr_t uk_bus_pf_devmap(__u64 base, __sz size);
+#endif /* CONFIG_PAGING */
#define PF_REGISTER_DRIVER(b) \
_PF_REGISTER_DRIVER(__LIBNAME__, b)
#include <uk/bus/platform.h>
#include <uk/plat/common/bootinfo.h>
+#if CONFIG_PAGING
+#include <uk/errptr.h>
+#include <uk/plat/paging.h>
+#endif /* CONFIG_PAGING */
+
#if CONFIG_LIBUKBUS_PLATFORM_FDT
#include <libfdt.h>
#include <uk/ofw/fdt.h>
return 0;
}
+#if CONFIG_PAGING
+__vaddr_t uk_bus_pf_devmap(__paddr_t base, __sz size)
+{
+ struct uk_pagetable *pt;
+ __vaddr_t vaddr, paddr;
+ unsigned long attr;
+ unsigned long pages;
+ int rc;
+
+ attr = PAGE_ATTR_PROT_RW;
+#ifdef CONFIG_ARCH_ARM_64
+ attr |= PAGE_ATTR_TYPE_DEVICE_nGnRnE;
+#endif /* CONFIG_ARCH_ARM_64 */
+
+ pages = ALIGN_UP(size, __PAGE_SIZE) >> PAGE_SHIFT;
+
+ pt = ukplat_pt_get_active();
+
+ paddr = ALIGN_DOWN(base, __PAGE_SIZE);
+ vaddr = ALIGN_DOWN(base, __PAGE_SIZE);
+
+ /* We can't use uk_vma_map_dma here as the vmem API is initialized
+ * way too late for the interrupt controller which is required for
+ * bringing up secondary cores.
+ */
+ rc = ukplat_page_map(pt, vaddr, paddr, pages, attr, 0);
+ if (!rc)
+ goto out;
+
+ if (rc == -EEXIST)
+ rc = ukplat_page_set_attr(pt, vaddr, pages, attr, 0);
+
+ if (unlikely(rc))
+ return (__vaddr_t)ERR2PTR(rc);
+out:
+ return (__vaddr_t)base;
+}
+#endif /* CONFIG_PAGING */
+
void _pf_register_driver(struct pf_driver *drv)
{
UK_ASSERT(drv != NULL);