static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
- int ret = -ENOSYS;
+ long ret;
void __user *udata = (void __user *) data;
+ unsigned long i, addr, nr, nr_pages;
+ int paged_out;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ LIST_HEAD(pagelist);
+ struct list_head *l, *l2;
switch (cmd) {
case IOCTL_PRIVCMD_HYPERCALL: {
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
+ ret = -ENOSYS;
#if defined(__i386__)
if (hypercall.op >= (PAGE_SIZE >> 5))
break;
break;
case IOCTL_PRIVCMD_MMAP: {
-#define MMAP_NR_PER_PAGE (int)((PAGE_SIZE-sizeof(struct list_head))/sizeof(privcmd_mmap_entry_t))
+#define MMAP_NR_PER_PAGE \
+ (unsigned long)((PAGE_SIZE - sizeof(*l)) / sizeof(*msg))
privcmd_mmap_t mmapcmd;
privcmd_mmap_entry_t *msg;
privcmd_mmap_entry_t __user *p;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long va;
- int i, rc;
- LIST_HEAD(pagelist);
- struct list_head *l,*l2;
if (!is_initial_xendomain())
return -EPERM;
p = mmapcmd.entry;
for (i = 0; i < mmapcmd.num;) {
- int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+ nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
- rc = -ENOMEM;
+ ret = -ENOMEM;
l = (struct list_head *) __get_free_page(GFP_KERNEL);
if (l == NULL)
goto mmap_out;
list_add_tail(l, &pagelist);
msg = (privcmd_mmap_entry_t*)(l + 1);
- rc = -EFAULT;
+ ret = -EFAULT;
if (copy_from_user(msg, p, nr*sizeof(*msg)))
goto mmap_out;
i += nr;
down_write(&mm->mmap_sem);
vma = find_vma(mm, msg->va);
- rc = -EINVAL;
+ ret = -EINVAL;
if (!vma || (msg->va != vma->vm_start))
goto mmap_out;
- va = vma->vm_start;
+ addr = vma->vm_start;
i = 0;
list_for_each(l, &pagelist) {
- int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+ nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
msg = (privcmd_mmap_entry_t*)(l + 1);
while (i<nr) {
/* Do not allow range to wrap the address space. */
if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
- ((unsigned long)(msg->npages << PAGE_SHIFT) >= -va))
+ (((unsigned long)msg->npages << PAGE_SHIFT) >= -addr))
goto mmap_out;
/* Range chunks must be contiguous in va space. */
- if ((msg->va != va) ||
+ if ((msg->va != addr) ||
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
goto mmap_out;
- va += msg->npages << PAGE_SHIFT;
+ addr += msg->npages << PAGE_SHIFT;
msg++;
i++;
}
}
if (!enforce_singleshot_mapping(vma, vma->vm_start,
- (va - vma->vm_start) >> PAGE_SHIFT))
+ (addr - vma->vm_start) >> PAGE_SHIFT))
goto mmap_out;
- va = vma->vm_start;
+ addr = vma->vm_start;
i = 0;
list_for_each(l, &pagelist) {
- int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+ nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
msg = (privcmd_mmap_entry_t*)(l + 1);
while (i < nr) {
- if ((rc = direct_remap_pfn_range(
+ if ((ret = direct_remap_pfn_range(
vma,
msg->va & PAGE_MASK,
msg->mfn,
mmapcmd.dom)) < 0)
goto mmap_out;
- va += msg->npages << PAGE_SHIFT;
+ addr += msg->npages << PAGE_SHIFT;
msg++;
i++;
}
}
- rc = 0;
+ ret = 0;
mmap_out:
up_write(&mm->mmap_sem);
list_for_each_safe(l,l2,&pagelist)
free_page((unsigned long)l);
- ret = rc;
}
#undef MMAP_NR_PER_PAGE
break;
case IOCTL_PRIVCMD_MMAPBATCH: {
-#define MMAPBATCH_NR_PER_PAGE (unsigned long)((PAGE_SIZE-sizeof(struct list_head))/sizeof(unsigned long))
+#define MMAPBATCH_NR_PER_PAGE \
+ (unsigned long)((PAGE_SIZE - sizeof(*l)) / sizeof(*mfn))
privcmd_mmapbatch_t m;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
xen_pfn_t __user *p;
- unsigned long addr, *mfn, nr_pages;
- int i;
- LIST_HEAD(pagelist);
- struct list_head *l, *l2;
- int paged_out = 0;
+ xen_pfn_t *mfn;
if (!is_initial_xendomain())
return -EPERM;
p = m.arr;
for (i=0; i<nr_pages; ) {
- int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+ nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
ret = -ENOMEM;
l = (struct list_head *)__get_free_page(GFP_KERNEL);
i = 0;
ret = 0;
+ paged_out = 0;
list_for_each(l, &pagelist) {
- int nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
- int rc;
-
+ nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
mfn = (unsigned long *)(l + 1);
while (i<nr) {
+ int rc;
+
rc = direct_remap_pfn_range(vma, addr & PAGE_MASK,
*mfn, PAGE_SIZE,
vma->vm_page_prot, m.dom);
else
ret = 0;
list_for_each(l, &pagelist) {
- int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+ nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
mfn = (unsigned long *)(l + 1);
if (copy_to_user(p, mfn, nr*sizeof(*mfn)))
ret = -EFAULT;
case IOCTL_PRIVCMD_MMAPBATCH_V2: {
privcmd_mmapbatch_v2_t m;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
const xen_pfn_t __user *p;
xen_pfn_t *mfn;
- unsigned long addr, nr_pages;
- unsigned int i, nr;
- LIST_HEAD(pagelist);
- struct list_head *l, *l2;
- int *err, paged_out;
+ int *err;
if (!is_initial_xendomain())
return -EPERM;
ret = 0;
paged_out = 0;
list_for_each(l, &pagelist) {
- int rc;
-
nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
mfn = (void *)(l + 1);
err = (void *)(l + 1);
BUILD_BUG_ON(sizeof(*err) > sizeof(*mfn));
while (i < nr) {
+ int rc;
+
rc = direct_remap_pfn_range(vma, addr & PAGE_MASK,
*mfn, PAGE_SIZE,
vma->vm_page_prot, m.dom);