ia64/xen-unstable

view linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c @ 4765:aee6d65586c1

bitkeeper revision 1.1389.5.14 (4279091dnpBW4lj9MvVqawaMwCC8pA)

initialize primcmd even in domU in preparation for access control checkin
author smh22@firebug.cl.cam.ac.uk
date Wed May 04 17:40:45 2005 +0000 (2005-05-04)
parents 608f95d3df77
children 39bfbd5ae9b8
line source
1 /******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
25 #include <asm/uaccess.h>
26 #include <asm/tlb.h>
27 #include <asm-xen/linux-public/privcmd.h>
28 #include <asm-xen/xen-public/dom0_ops.h>
29 #include <asm-xen/xen_proc.h>
31 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
32 #define pud_t pgd_t
33 #define pud_offset(d, va) d
34 #endif
36 static struct proc_dir_entry *privcmd_intf;
38 static int privcmd_ioctl(struct inode *inode, struct file *file,
39 unsigned int cmd, unsigned long data)
40 {
41 int ret = -ENOSYS;
43 switch ( cmd )
44 {
45 case IOCTL_PRIVCMD_HYPERCALL:
46 {
47 privcmd_hypercall_t hypercall;
49 if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
50 return -EFAULT;
52 #if defined(__i386__)
53 __asm__ __volatile__ (
54 "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
55 "movl 4(%%eax),%%ebx ;"
56 "movl 8(%%eax),%%ecx ;"
57 "movl 12(%%eax),%%edx ;"
58 "movl 16(%%eax),%%esi ;"
59 "movl 20(%%eax),%%edi ;"
60 "movl (%%eax),%%eax ;"
61 TRAP_INSTR "; "
62 "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
63 : "=a" (ret) : "0" (&hypercall) : "memory" );
64 #elif defined (__x86_64__)
65 __asm__ __volatile__ (
66 "movq %5,%%r10; movq %6,%%r8;" TRAP_INSTR
67 : "=a" (ret)
68 : "a" ((unsigned long)hypercall.op),
69 "D" ((unsigned long)hypercall.arg[0]),
70 "S" ((unsigned long)hypercall.arg[1]),
71 "d" ((unsigned long)hypercall.arg[2]),
72 "g" ((unsigned long)hypercall.arg[3]),
73 "g" ((unsigned long)hypercall.arg[4])
74 : "r11","rcx","r8","r10","memory");
75 #endif
76 }
77 break;
79 case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
80 {
81 extern int initdom_ctrlif_domcontroller_port;
82 ret = initdom_ctrlif_domcontroller_port;
83 }
84 break;
86 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
87 case IOCTL_PRIVCMD_MMAP:
88 {
89 #define PRIVCMD_MMAP_SZ 32
90 privcmd_mmap_t mmapcmd;
91 privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
92 int i, rc;
94 if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
95 return -EFAULT;
97 p = mmapcmd.entry;
99 for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
100 {
101 int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
102 PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
105 if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
106 return -EFAULT;
108 for ( j = 0; j < n; j++ )
109 {
110 struct vm_area_struct *vma =
111 find_vma( current->mm, msg[j].va );
113 if ( !vma )
114 return -EINVAL;
116 if ( msg[j].va > PAGE_OFFSET )
117 return -EINVAL;
119 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
120 return -EINVAL;
122 if ( (rc = direct_remap_area_pages(vma->vm_mm,
123 msg[j].va&PAGE_MASK,
124 msg[j].mfn<<PAGE_SHIFT,
125 msg[j].npages<<PAGE_SHIFT,
126 vma->vm_page_prot,
127 mmapcmd.dom)) < 0 )
128 return rc;
129 }
130 }
131 ret = 0;
132 }
133 break;
135 case IOCTL_PRIVCMD_MMAPBATCH:
136 {
137 mmu_update_t u;
138 privcmd_mmapbatch_t m;
139 struct vm_area_struct *vma = NULL;
140 unsigned long *p, addr;
141 unsigned long mfn;
142 int i;
144 if ( copy_from_user(&m, (void *)data, sizeof(m)) )
145 { ret = -EFAULT; goto batch_err; }
147 vma = find_vma( current->mm, m.addr );
149 if ( !vma )
150 { ret = -EINVAL; goto batch_err; }
152 if ( m.addr > PAGE_OFFSET )
153 { ret = -EFAULT; goto batch_err; }
155 if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
156 { ret = -EFAULT; goto batch_err; }
158 p = m.arr;
159 addr = m.addr;
160 for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
161 {
162 if ( get_user(mfn, p) )
163 return -EFAULT;
165 u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
167 __direct_remap_area_pages(vma->vm_mm,
168 addr,
169 PAGE_SIZE,
170 &u);
172 if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
173 put_user(0xF0000000 | mfn, p);
174 }
176 ret = 0;
177 break;
179 batch_err:
180 printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
181 ret, vma, m.addr, m.num, m.arr,
182 vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
183 break;
184 }
185 break;
186 #endif
188 case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
189 {
190 unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
191 pgd_t *pgd = pgd_offset_k(m2pv);
192 pud_t *pud = pud_offset(pgd, m2pv);
193 pmd_t *pmd = pmd_offset(pud, m2pv);
194 unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT;
195 ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
196 }
197 break;
199 default:
200 ret = -EINVAL;
201 break;
202 }
203 return ret;
204 }
206 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
207 {
208 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
209 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
211 return 0;
212 }
214 static struct file_operations privcmd_file_ops = {
215 ioctl : privcmd_ioctl,
216 mmap: privcmd_mmap
217 };
220 static int __init privcmd_init(void)
221 {
222 privcmd_intf = create_xen_proc_entry("privcmd", 0400);
223 if ( privcmd_intf != NULL )
224 privcmd_intf->proc_fops = &privcmd_file_ops;
226 return 0;
227 }
229 __initcall(privcmd_init);