ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c @ 7873:4c0606fb701b

Fix IOCTL_PRIVCMD_GET_MACH2PHYS_MFNS to work properly for
4MB superpages (32-bit non-pae).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Nov 17 11:47:49 2005 +0100 (2005-11-17)
parents fe3a892b33b4
children 419b32f72179
line source
1 /******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/kthread.h>
23 #include <asm/hypervisor.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/uaccess.h>
28 #include <asm/tlb.h>
29 #include <asm/hypervisor.h>
30 #include <asm-xen/linux-public/privcmd.h>
31 #include <asm-xen/xen-public/xen.h>
32 #include <asm-xen/xen-public/dom0_ops.h>
33 #include <asm-xen/xen_proc.h>
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
36 #define pud_t pgd_t
37 #define pud_offset(d, va) d
38 #endif
40 static struct proc_dir_entry *privcmd_intf;
42 static int privcmd_ioctl(struct inode *inode, struct file *file,
43 unsigned int cmd, unsigned long data)
44 {
45 int ret = -ENOSYS;
47 switch (cmd) {
48 case IOCTL_PRIVCMD_HYPERCALL: {
49 privcmd_hypercall_t hypercall;
51 if (copy_from_user(&hypercall, (void *)data,
52 sizeof(hypercall)))
53 return -EFAULT;
55 #if defined(__i386__)
56 __asm__ __volatile__ (
57 "pushl %%ebx; pushl %%ecx; pushl %%edx; "
58 "pushl %%esi; pushl %%edi; "
59 "movl 4(%%eax),%%ebx ;"
60 "movl 8(%%eax),%%ecx ;"
61 "movl 12(%%eax),%%edx ;"
62 "movl 16(%%eax),%%esi ;"
63 "movl 20(%%eax),%%edi ;"
64 "movl (%%eax),%%eax ;"
65 TRAP_INSTR "; "
66 "popl %%edi; popl %%esi; popl %%edx; "
67 "popl %%ecx; popl %%ebx"
68 : "=a" (ret) : "0" (&hypercall) : "memory" );
69 #elif defined (__x86_64__)
70 {
71 long ign1, ign2, ign3;
72 __asm__ __volatile__ (
73 "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
74 : "=a" (ret), "=D" (ign1),
75 "=S" (ign2), "=d" (ign3)
76 : "0" ((unsigned long)hypercall.op),
77 "1" ((unsigned long)hypercall.arg[0]),
78 "2" ((unsigned long)hypercall.arg[1]),
79 "3" ((unsigned long)hypercall.arg[2]),
80 "g" ((unsigned long)hypercall.arg[3]),
81 "g" ((unsigned long)hypercall.arg[4])
82 : "r11","rcx","r8","r10","memory");
83 }
84 #elif defined (__ia64__)
85 __asm__ __volatile__ (
86 ";; mov r14=%2; mov r15=%3; "
87 "mov r16=%4; mov r17=%5; mov r18=%6;"
88 "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
89 : "=r" (ret)
90 : "r" (hypercall.op),
91 "r" (hypercall.arg[0]),
92 "r" (hypercall.arg[1]),
93 "r" (hypercall.arg[2]),
94 "r" (hypercall.arg[3]),
95 "r" (hypercall.arg[4])
96 : "r14","r15","r16","r17","r18","r2","r8","memory");
97 #endif
98 }
99 break;
101 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
102 case IOCTL_PRIVCMD_MMAP: {
103 #define PRIVCMD_MMAP_SZ 32
104 privcmd_mmap_t mmapcmd;
105 privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
106 int i, rc;
108 if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
109 return -EFAULT;
111 p = mmapcmd.entry;
113 for (i = 0; i < mmapcmd.num;
114 i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
115 int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
116 PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
118 if (copy_from_user(&msg, p,
119 n*sizeof(privcmd_mmap_entry_t)))
120 return -EFAULT;
122 for (j = 0; j < n; j++) {
123 struct vm_area_struct *vma =
124 find_vma( current->mm, msg[j].va );
126 if (!vma)
127 return -EINVAL;
129 if (msg[j].va > PAGE_OFFSET)
130 return -EINVAL;
132 if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
133 > vma->vm_end )
134 return -EINVAL;
136 if ((rc = direct_remap_pfn_range(
137 vma,
138 msg[j].va&PAGE_MASK,
139 msg[j].mfn,
140 msg[j].npages<<PAGE_SHIFT,
141 vma->vm_page_prot,
142 mmapcmd.dom)) < 0)
143 return rc;
144 }
145 }
146 ret = 0;
147 }
148 break;
150 case IOCTL_PRIVCMD_MMAPBATCH: {
151 mmu_update_t u;
152 privcmd_mmapbatch_t m;
153 struct vm_area_struct *vma = NULL;
154 unsigned long *p, addr;
155 unsigned long mfn;
156 uint64_t ptep;
157 int i;
159 if (copy_from_user(&m, (void *)data, sizeof(m))) {
160 ret = -EFAULT;
161 goto batch_err;
162 }
164 vma = find_vma( current->mm, m.addr );
165 if (!vma) {
166 ret = -EINVAL;
167 goto batch_err;
168 }
170 if (m.addr > PAGE_OFFSET) {
171 ret = -EFAULT;
172 goto batch_err;
173 }
175 if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
176 ret = -EFAULT;
177 goto batch_err;
178 }
180 p = m.arr;
181 addr = m.addr;
182 for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
183 if (get_user(mfn, p))
184 return -EFAULT;
185 #ifdef __ia64__
186 ret = remap_pfn_range(vma,
187 addr&PAGE_MASK,
188 mfn,
189 1<<PAGE_SHIFT,
190 vma->vm_page_prot);
191 if (ret < 0)
192 goto batch_err;
193 #else
195 ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
196 if (ret)
197 goto batch_err;
199 u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
200 u.ptr = ptep;
202 if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
203 put_user(0xF0000000 | mfn, p);
204 #endif
205 }
207 ret = 0;
208 break;
210 batch_err:
211 printk("batch_err ret=%d vma=%p addr=%lx "
212 "num=%d arr=%p %lx-%lx\n",
213 ret, vma, m.addr, m.num, m.arr,
214 vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
215 break;
216 }
217 break;
218 #endif
220 #ifndef __ia64__
221 case IOCTL_PRIVCMD_GET_MACH2PHYS_MFNS: {
222 pgd_t *pgd;
223 pud_t *pud;
224 pmd_t *pmd;
225 unsigned long m2pv, m2p_mfn;
226 privcmd_m2pmfns_t m;
227 unsigned long *p;
228 int i;
230 if (copy_from_user(&m, (void *)data, sizeof(m)))
231 return -EFAULT;
233 m2pv = (unsigned long)machine_to_phys_mapping;
235 p = m.arr;
237 for (i=0; i < m.num; i++) {
238 pgd = pgd_offset_k(m2pv);
239 pud = pud_offset(pgd, m2pv);
240 pmd = pmd_offset(pud, m2pv);
241 m2p_mfn = (*(uint64_t *)pmd >> PAGE_SHIFT)&0xFFFFFFFF;
242 m2p_mfn += pte_index(m2pv);
244 if (put_user(m2p_mfn, p + i))
245 return -EFAULT;
247 m2pv += (1 << 21);
248 }
250 ret = 0;
251 break;
253 }
254 break;
255 #endif
257 default:
258 ret = -EINVAL;
259 break;
260 }
262 return ret;
263 }
265 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
266 {
267 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
268 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
270 return 0;
271 }
273 static struct file_operations privcmd_file_ops = {
274 .ioctl = privcmd_ioctl,
275 .mmap = privcmd_mmap,
276 };
279 static int __init privcmd_init(void)
280 {
281 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
282 if (privcmd_intf != NULL)
283 privcmd_intf->proc_fops = &privcmd_file_ops;
285 return 0;
286 }
288 __initcall(privcmd_init);
290 /*
291 * Local variables:
292 * c-file-style: "linux"
293 * indent-tabs-mode: t
294 * c-indent-level: 8
295 * c-basic-offset: 8
296 * tab-width: 8
297 * End:
298 */