ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH, (unsigned long)p);
+#ifdef xen_pfn32_t
+ for (i = 0; !ret && i < n32.num; ++i) {
+ xen_pfn_t mfn;
+
+ if (get_user(mfn, arr + i) || put_user(mfn, arr32 + i))
+ ret = -EFAULT;
+ else if (mfn != (xen_pfn32_t)mfn)
+ ret = -ERANGE;
+ }
+#endif
+ }
+ break;
+ case IOCTL_PRIVCMD_MMAPBATCH_V2_32: {
+ struct privcmd_mmapbatch_v2 *p;
+ struct privcmd_mmapbatch_v2_32 *p32;
+ struct privcmd_mmapbatch_v2_32 n32;
+#ifdef xen_pfn32_t
+ xen_pfn_t *__user arr;
+ xen_pfn32_t *__user arr32;
+ unsigned int i;
+#endif
+
+ p32 = compat_ptr(arg);
+ p = compat_alloc_user_space(sizeof(*p));
+ if (copy_from_user(&n32, p32, sizeof(n32)) ||
+ put_user(n32.num, &p->num) ||
+ put_user(n32.dom, &p->dom) ||
+ put_user(n32.addr, &p->addr) ||
+ put_user(compat_ptr(n32.err), &p->err))
+ return -EFAULT;
+#ifdef xen_pfn32_t
+ arr = compat_alloc_user_space(n32.num * sizeof(*arr)
+ + sizeof(*p));
+ arr32 = compat_ptr(n32.arr);
+ for (i = 0; i < n32.num; ++i) {
+ xen_pfn32_t mfn;
+
+ if (get_user(mfn, arr32 + i) || put_user(mfn, arr + i))
+ return -EFAULT;
+ }
+
+ if (put_user(arr, &p->arr))
+ return -EFAULT;
+#else
+ if (put_user(compat_ptr(n32.arr), &p->arr))
+ return -EFAULT;
+#endif
+
+ ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH_V2, (unsigned long)p);
+
#ifdef xen_pfn32_t
for (i = 0; !ret && i < n32.num; ++i) {
xen_pfn_t mfn;
mmapbatch_out:
list_for_each_safe(l,l2,&pagelist)
free_page((unsigned long)l);
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH_V2: {
+ privcmd_mmapbatch_v2_t m;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ const xen_pfn_t __user *p;
+ xen_pfn_t *mfn;
+ unsigned long addr, nr_pages;
+ unsigned int i, nr;
+ LIST_HEAD(pagelist);
+ struct list_head *l, *l2;
+ int *err, paged_out;
+
+ if (!is_initial_xendomain())
+ return -EPERM;
+
+ if (copy_from_user(&m, udata, sizeof(m)))
+ return -EFAULT;
+
+ nr_pages = m.num;
+ addr = m.addr;
+ if (m.num <= 0 || nr_pages > (ULONG_MAX >> PAGE_SHIFT) ||
+ addr != m.addr || nr_pages > (-addr >> PAGE_SHIFT))
+ return -EINVAL;
+
+ p = m.arr;
+ for (i = 0; i < nr_pages; i += nr, p += nr) {
+ nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+
+ ret = -ENOMEM;
+ l = (struct list_head *)__get_free_page(GFP_KERNEL);
+ if (l == NULL)
+ goto mmapbatch_v2_out;
+
+ INIT_LIST_HEAD(l);
+ list_add_tail(l, &pagelist);
+
+ mfn = (void *)(l + 1);
+ ret = -EFAULT;
+ if (copy_from_user(mfn, p, nr * sizeof(*mfn)))
+ goto mmapbatch_v2_out;
+ }
+
+ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, addr);
+ ret = -EINVAL;
+ if (!vma ||
+ addr < vma->vm_start ||
+ addr + (nr_pages << PAGE_SHIFT) > vma->vm_end ||
+ !enforce_singleshot_mapping(vma, addr, nr_pages)) {
+ up_write(&mm->mmap_sem);
+ goto mmapbatch_v2_out;
+ }
+
+ i = 0;
+ ret = 0;
+ paged_out = 0;
+ list_for_each(l, &pagelist) {
+ int rc;
+
+ nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+ mfn = (void *)(l + 1);
+ err = (void *)(l + 1);
+ BUILD_BUG_ON(sizeof(*err) > sizeof(*mfn));
+
+ while (i < nr) {
+ rc = direct_remap_pfn_range(vma, addr & PAGE_MASK,
+ *mfn, PAGE_SIZE,
+ vma->vm_page_prot, m.dom);
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ paged_out = 1;
+ ret++;
+ } else
+ BUG_ON(rc > 0);
+ *err++ = rc;
+ mfn++; i++; addr += PAGE_SIZE;
+ }
+ }
+
+ up_write(&mm->mmap_sem);
+
+ if (ret > 0) {
+ int __user *p = m.err;
+
+ ret = paged_out ? -ENOENT : 0;
+ i = 0;
+ list_for_each(l, &pagelist) {
+ nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+ err = (void *)(l + 1);
+ if (copy_to_user(p, err, nr * sizeof(*err)))
+ ret = -EFAULT;
+ i += nr; p += nr;
+ }
+ }
+
+ mmapbatch_v2_out:
+ list_for_each_safe(l, l2, &pagelist)
+ free_page((unsigned long)l);
#undef MMAPBATCH_NR_PER_PAGE
}
break;
#ifdef CONFIG_XEN
HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32)
HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32)
+HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_V2_32, privcmd_ioctl_32)
COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL)
COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ)
COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN)
#endif
compat_uptr_t arr; /* array of mfns - top nibble set on err */
};
+
+struct privcmd_mmapbatch_v2_32 {
+ unsigned int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ union { /* virtual address */
+ __u64 addr __attribute__((packed));
+ __u32 va; /* ensures union is 4-byte aligned */
+ };
+#else
+ __u64 addr; /* virtual address */
+#endif
+ compat_uptr_t arr; /* array of mfns */
+ compat_uptr_t err; /* array of error codes */
+};
+
#define IOCTL_PRIVCMD_MMAP_32 \
_IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32))
-#define IOCTL_PRIVCMD_MMAPBATCH_32 \
+#define IOCTL_PRIVCMD_MMAPBATCH_32 \
_IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32))
+#define IOCTL_PRIVCMD_MMAPBATCH_V2_32 \
+ _IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2_32))
#endif /* __LINUX_XEN_COMPAT_H__ */
xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
} privcmd_mmapbatch_t;
+typedef struct privcmd_mmapbatch_v2 {
+ unsigned int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+ __u64 addr; /* virtual address */
+ const xen_pfn_t __user *arr; /* array of mfns */
+ int __user *err; /* array of error codes */
+} privcmd_mmapbatch_v2_t;
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
_IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
#define IOCTL_PRIVCMD_MMAPBATCH \
_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
+#define IOCTL_PRIVCMD_MMAPBATCH_V2 \
+ _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */