ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c @ 6491:7296ed60874f

Fix user-space four- and five-argument hypercalls on
x86/64. This fixes domU building on x86/64 (bug #196).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 30 14:14:31 2005 +0000 (2005-08-30)
parents 37e9c9cd6c14
children b043928b0873
line source
1 /******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/kthread.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
27 #include <asm/tlb.h>
28 #include <asm-xen/linux-public/privcmd.h>
29 #include <asm-xen/xen-public/dom0_ops.h>
30 #include <asm-xen/xen_proc.h>
32 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
33 #define pud_t pgd_t
34 #define pud_offset(d, va) d
35 #endif
37 static struct proc_dir_entry *privcmd_intf;
39 static int privcmd_ioctl(struct inode *inode, struct file *file,
40 unsigned int cmd, unsigned long data)
41 {
42 int ret = -ENOSYS;
44 switch ( cmd )
45 {
46 case IOCTL_PRIVCMD_HYPERCALL:
47 {
48 privcmd_hypercall_t hypercall;
50 if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
51 return -EFAULT;
53 #if defined(__i386__)
54 __asm__ __volatile__ (
55 "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
56 "movl 4(%%eax),%%ebx ;"
57 "movl 8(%%eax),%%ecx ;"
58 "movl 12(%%eax),%%edx ;"
59 "movl 16(%%eax),%%esi ;"
60 "movl 20(%%eax),%%edi ;"
61 "movl (%%eax),%%eax ;"
62 TRAP_INSTR "; "
63 "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
64 : "=a" (ret) : "0" (&hypercall) : "memory" );
65 #elif defined (__x86_64__)
66 {
67 long ign1, ign2, ign3;
68 __asm__ __volatile__ (
69 "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
70 : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
71 : "0" ((unsigned long)hypercall.op),
72 "1" ((unsigned long)hypercall.arg[0]),
73 "2" ((unsigned long)hypercall.arg[1]),
74 "3" ((unsigned long)hypercall.arg[2]),
75 "g" ((unsigned long)hypercall.arg[3]),
76 "g" ((unsigned long)hypercall.arg[4])
77 : "r11","rcx","r8","r10","memory");
78 }
79 #endif
80 }
81 break;
83 case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
84 {
85 extern int initdom_ctrlif_domcontroller_port;
86 ret = initdom_ctrlif_domcontroller_port;
87 }
88 break;
90 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
91 case IOCTL_PRIVCMD_MMAP:
92 {
93 #define PRIVCMD_MMAP_SZ 32
94 privcmd_mmap_t mmapcmd;
95 privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
96 int i, rc;
98 if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
99 return -EFAULT;
101 p = mmapcmd.entry;
103 for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
104 {
105 int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
106 PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
109 if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
110 return -EFAULT;
112 for ( j = 0; j < n; j++ )
113 {
114 struct vm_area_struct *vma =
115 find_vma( current->mm, msg[j].va );
117 if ( !vma )
118 return -EINVAL;
120 if ( msg[j].va > PAGE_OFFSET )
121 return -EINVAL;
123 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
124 return -EINVAL;
126 if ( (rc = direct_remap_area_pages(vma->vm_mm,
127 msg[j].va&PAGE_MASK,
128 msg[j].mfn<<PAGE_SHIFT,
129 msg[j].npages<<PAGE_SHIFT,
130 vma->vm_page_prot,
131 mmapcmd.dom)) < 0 )
132 return rc;
133 }
134 }
135 ret = 0;
136 }
137 break;
139 case IOCTL_PRIVCMD_MMAPBATCH:
140 {
141 mmu_update_t u;
142 privcmd_mmapbatch_t m;
143 struct vm_area_struct *vma = NULL;
144 unsigned long *p, addr;
145 unsigned long mfn, ptep;
146 int i;
148 if ( copy_from_user(&m, (void *)data, sizeof(m)) )
149 { ret = -EFAULT; goto batch_err; }
151 vma = find_vma( current->mm, m.addr );
153 if ( !vma )
154 { ret = -EINVAL; goto batch_err; }
156 if ( m.addr > PAGE_OFFSET )
157 { ret = -EFAULT; goto batch_err; }
159 if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
160 { ret = -EFAULT; goto batch_err; }
162 p = m.arr;
163 addr = m.addr;
164 for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
165 {
166 if ( get_user(mfn, p) )
167 return -EFAULT;
169 ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
170 if (ret)
171 goto batch_err;
173 u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
174 u.ptr = ptep;
176 if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
177 put_user(0xF0000000 | mfn, p);
178 }
180 ret = 0;
181 break;
183 batch_err:
184 printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
185 ret, vma, m.addr, m.num, m.arr,
186 vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
187 break;
188 }
189 break;
190 #endif
192 case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
193 {
194 unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
195 pgd_t *pgd = pgd_offset_k(m2pv);
196 pud_t *pud = pud_offset(pgd, m2pv);
197 pmd_t *pmd = pmd_offset(pud, m2pv);
198 unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT;
199 ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
200 }
201 break;
203 case IOCTL_PRIVCMD_INITDOMAIN_STORE:
204 {
205 extern int do_xenbus_probe(void*);
206 unsigned long page;
208 if (xen_start_info.store_evtchn != 0) {
209 ret = xen_start_info.store_mfn;
210 break;
211 }
213 /* Allocate page. */
214 page = get_zeroed_page(GFP_KERNEL);
215 if (!page) {
216 ret = -ENOMEM;
217 break;
218 }
220 /* We don't refcnt properly, so set reserved on page.
221 * (this allocation is permanent) */
222 SetPageReserved(virt_to_page(page));
224 /* Initial connect. Setup channel and page. */
225 xen_start_info.store_evtchn = data;
226 xen_start_info.store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
227 PAGE_SHIFT);
228 ret = xen_start_info.store_mfn;
230 /* We'll return then this will wait for daemon to answer */
231 kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
232 }
233 break;
235 default:
236 ret = -EINVAL;
237 break;
238 }
239 return ret;
240 }
242 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
243 {
244 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
245 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
247 return 0;
248 }
250 static struct file_operations privcmd_file_ops = {
251 ioctl : privcmd_ioctl,
252 mmap: privcmd_mmap
253 };
256 static int __init privcmd_init(void)
257 {
258 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
259 if ( privcmd_intf != NULL )
260 privcmd_intf->proc_fops = &privcmd_file_ops;
262 return 0;
263 }
265 __initcall(privcmd_init);