int type, elf_lookup_fn lookup __unused)
{
- panic("elf_reloc_local");
+ panic("ARM64TODO: elf_reloc_local");
}
/* Process one elf relocation with addend. */
elf_lookup_fn lookup)
{
- panic("elf_reloc");
+ panic("ARM64TODO: elf_reloc");
}
int
fill_dbregs(struct thread *td, struct dbreg *regs)
{
- panic("fill_dbregs");
+ panic("ARM64TODO: fill_dbregs");
}
int
set_dbregs(struct thread *td, struct dbreg *regs)
{
- panic("set_dbregs");
+ panic("ARM64TODO: set_dbregs");
}
int
ptrace_set_pc(struct thread *td, u_long addr)
{
- panic("ptrace_set_pc");
+ panic("ARM64TODO: ptrace_set_pc");
return (0);
}
cpu_halt(void)
{
- panic("cpu_halt");
+ panic("ARM64TODO: cpu_halt");
}
/*
cpu_flush_dcache(void *ptr, size_t len)
{
- /* TBD */
+ /* ARM64TODO TBD */
}
/* Get current clock frequency for the given CPU ID. */
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
- panic("cpu_est_clockrate");
+ panic("ARM64TODO: cpu_est_clockrate");
}
void
memrw(struct cdev *dev, struct uio *uio, int flags)
{
- panic("memrw");
+ panic("ARM64TODO: memrw");
}
reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
{
- panic("reclaim_pv_chunk");
+ panic("ARM64TODO: reclaim_pv_chunk");
}
/*
* at all. We need to be able to set it in
* the exception handler.
*/
- panic("TODO: safe_to_clear_referenced\n");
+ panic("ARM64TODO: safe_to_clear_referenced\n");
} else if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) {
/*
* Wired pages cannot be paged out so
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
- /* TODO: We lack support for tracking if a page is modified */
+ /* ARM64TODO: We lack support for tracking if a page is modified */
}
void *
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
- panic("pmap_page_set_memattr");
+ panic("ARM64TODO: pmap_page_set_memattr");
}
/*
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
- panic("pmap_mincore");
+ panic("ARM64TODO: pmap_mincore");
}
void
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
- panic("pmap_sync_icache");
+ panic("ARM64TODO: pmap_sync_icache");
}
/*
for (i = 0; i < count; i++) {
paddr = VM_PAGE_TO_PHYS(page[i]);
if (paddr >= DMAP_MAX_PHYSADDR) {
- panic("pmap_unmap_io_transient: TODO: Unmap data");
+ panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
}
}
}
if (TD_IS_RUNNING(td))
panic("stack_save_td: running");
- stack_zero(st);
+ stack_zero(st); /* ARM64TODO */
}
void
stack_save(struct stack *st)
{
- stack_zero(st);
+ stack_zero(st); /* ARM64TODO */
}
sa->narg = sa->callp->sy_narg;
memcpy(sa->args, ap, nap * sizeof(register_t));
if (sa->narg > nap)
- panic("TODO: Could we have more then 8 args?");
+ panic("ARM64TODO: Could we have more then 8 args?");
td->td_retval[0] = 0;
td->td_retval[1] = 0;
do_el0_error(struct trapframe *frame)
{
- panic("do_el0_error");
+ panic("ARM64TODO: do_el0_error");
}
}
out:
if (__predict_false(mapped)) {
- panic("TODO 3");
+ panic("ARM64TODO: uiomove_fromphys");
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
TRUE);
}
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
{
- panic("uma_small_alloc");
+ panic("ARM64TODO: uma_small_alloc");
}
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
- panic("uma_small_free");
+ panic("ARM64TODO: uma_small_free");
}