direct-io.hg

view xen/arch/ia64/vmx/vmx_hypercall.c @ 11342:1317833f657b

[IA64] Fix comment of vmx_hypercall.c

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Mon Aug 28 13:12:42 2006 -0600 (2006-08-28)
parents c232365128cf
children 26ba157bef06
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_hyparcall.c: handling hypercall from domain
4 * Copyright (c) 2005, Intel Corporation.
5 * Copyright (c) 2006, Fujitsu Limited.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Tsunehisa Doi (Doi.Tsunehisa@jp.fujitsu.com)
22 * Tomonari Horikoshi (t.horikoshi@jp.fujitsu.com)
23 */
25 #include <xen/config.h>
26 #include <xen/errno.h>
27 #include <asm/vmx_vcpu.h>
28 #include <xen/guest_access.h>
29 #include <public/event_channel.h>
30 #include <asm/vmmu.h>
31 #include <asm/tlb.h>
32 #include <asm/regionreg.h>
33 #include <asm/page.h>
34 #include <xen/mm.h>
35 #include <xen/multicall.h>
36 #include <xen/hypercall.h>
37 #include <public/version.h>
38 #include <asm/dom_fw.h>
39 #include <xen/domain.h>
40 #include <xen/compile.h>
41 #include <xen/event.h>
43 static void
44 vmx_free_pages(unsigned long pgaddr, int npg)
45 {
46 for (; npg > 0; npg--, pgaddr += PAGE_SIZE) {
47 /* If original page belongs to xen heap, then relinguish back
48 * to xen heap. Or else, leave to domain itself to decide.
49 */
50 if (likely(IS_XEN_HEAP_FRAME(virt_to_page(pgaddr)))) {
51 free_domheap_page(virt_to_page(pgaddr));
52 free_xenheap_page((void *)pgaddr);
53 }
54 else {
55 put_page(virt_to_page(pgaddr));
56 }
57 }
58 }
60 static int
61 vmx_gnttab_setup_table(unsigned long frame_pa, unsigned long nr_frames)
62 {
63 struct domain *d = current->domain;
64 unsigned long o_grant_shared, pgaddr;
66 if ((nr_frames != NR_GRANT_FRAMES) || (frame_pa & (PAGE_SIZE - 1))) {
67 return -EINVAL;
68 }
70 pgaddr = domain_mpa_to_imva(d, frame_pa);
71 if (pgaddr == NULL) {
72 return -EFAULT;
73 }
75 o_grant_shared = (unsigned long)d->grant_table->shared;
76 d->grant_table->shared = (struct grant_entry *)pgaddr;
78 /* Copy existing grant table into new page */
79 if (o_grant_shared) {
80 memcpy((void *)d->grant_table->shared,
81 (void *)o_grant_shared, PAGE_SIZE * nr_frames);
82 vmx_free_pages(o_grant_shared, nr_frames);
83 }
84 else {
85 memset((void *)d->grant_table->shared, 0, PAGE_SIZE * nr_frames);
86 }
87 return 0;
88 }
90 static int
91 vmx_setup_shared_info_page(unsigned long gpa)
92 {
93 VCPU *vcpu = current;
94 struct domain *d = vcpu->domain;
95 unsigned long o_info, pgaddr;
96 struct vcpu *v;
98 if (gpa & (PAGE_SIZE - 1)) {
99 return -EINVAL;
100 }
102 pgaddr = domain_mpa_to_imva(d, gpa);
103 if (pgaddr == NULL) {
104 return -EFAULT;
105 }
107 o_info = (u64)d->shared_info;
108 d->shared_info = (shared_info_t *)pgaddr;
110 /* Copy existing shared info into new page */
111 if (o_info) {
112 memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
113 for_each_vcpu(d, v) {
114 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
115 }
116 vmx_free_pages(o_info, 1);
117 }
118 else {
119 memset((void *)d->shared_info, 0, PAGE_SIZE);
120 }
121 return 0;
122 }
124 long
125 do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
126 {
127 long rc = 0;
129 switch (op) {
130 case HVMOP_set_param:
131 case HVMOP_get_param:
132 {
133 struct xen_hvm_param a;
134 struct domain *d;
136 if (copy_from_guest(&a, arg, 1))
137 return -EFAULT;
139 if (a.index > HVM_NR_PARAMS)
140 return -EINVAL;
142 if (a.domid == DOMID_SELF) {
143 get_knownalive_domain(current->domain);
144 d = current->domain;
145 }
146 else if (IS_PRIV(current->domain)) {
147 d = find_domain_by_id(a.domid);
148 if (d == NULL)
149 return -ESRCH;
150 }
151 else
152 return -EPERM;
154 if (op == HVMOP_set_param) {
155 d->arch.hvm_domain.params[a.index] = a.value;
156 rc = 0;
157 }
158 else {
159 a.value = d->arch.hvm_domain.params[a.index];
160 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
161 }
163 put_domain(d);
164 break;
165 }
167 case HVMOP_setup_gnttab_table:
168 case HVMOP_setup_shared_info_page:
169 {
170 struct xen_hvm_setup a;
172 if (copy_from_guest(&a, arg, 1))
173 return -EFAULT;
175 switch (op) {
176 case HVMOP_setup_gnttab_table:
177 printk("vmx_gnttab_setup_table: frame_pa=%#lx,"
178 "nr_frame=%ld\n", a.arg1, a.arg2);
179 return vmx_gnttab_setup_table(a.arg1, a.arg2);
180 case HVMOP_setup_shared_info_page:
181 printk("vmx_setup_shared_info_page: gpa=0x%lx\n", a.arg1);
182 return vmx_setup_shared_info_page(a.arg1);
183 }
184 }
186 default:
187 DPRINTK("Bad HVM op %ld.\n", op);
188 rc = -ENOSYS;
189 }
190 return rc;
191 }