ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_hypercall.c @ 9563:9bee4875a848

Rename sched_op->sched_op_compat and sched_op_new->sched_op
after Christian's interface cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 01 11:08:50 2006 +0100 (2006-04-01)
parents 0ed4a312765b
children 42a8e3101c6c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_hyparcall.c: handling hypercall from domain
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 */
22 #include <xen/config.h>
23 #include <xen/errno.h>
24 #include <asm/vmx_vcpu.h>
25 #include <xen/guest_access.h>
26 #include <public/event_channel.h>
27 #include <asm/vmmu.h>
28 #include <asm/tlb.h>
29 #include <asm/regionreg.h>
30 #include <asm/page.h>
31 #include <xen/mm.h>
32 #include <xen/multicall.h>
33 #include <xen/hypercall.h>
34 #include <public/version.h>
35 #include <asm/dom_fw.h>
36 #include <xen/domain.h>
38 extern long do_sched_op_compat(int cmd, unsigned long arg);
39 extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
41 void hyper_not_support(void)
42 {
43 VCPU *vcpu=current;
44 vcpu_set_gr(vcpu, 8, -1, 0);
45 vmx_vcpu_increment_iip(vcpu);
46 }
48 void hyper_mmu_update(void)
49 {
50 VCPU *vcpu=current;
51 u64 r32,r33,r34,r35,ret;
52 vcpu_get_gr_nat(vcpu,16,&r32);
53 vcpu_get_gr_nat(vcpu,17,&r33);
54 vcpu_get_gr_nat(vcpu,18,&r34);
55 vcpu_get_gr_nat(vcpu,19,&r35);
56 ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35);
57 vcpu_set_gr(vcpu, 8, ret, 0);
58 vmx_vcpu_increment_iip(vcpu);
59 }
61 void hyper_dom_mem_op(void)
62 {
63 VCPU *vcpu=current;
64 u64 r32,r33,r34,r35,r36;
65 u64 ret;
66 vcpu_get_gr_nat(vcpu,16,&r32);
67 vcpu_get_gr_nat(vcpu,17,&r33);
68 vcpu_get_gr_nat(vcpu,18,&r34);
69 vcpu_get_gr_nat(vcpu,19,&r35);
70 vcpu_get_gr_nat(vcpu,20,&r36);
71 // ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
72 ret = 0;
73 printf("do_dom_mem return value: %lx\n", ret);
74 vcpu_set_gr(vcpu, 8, ret, 0);
76 /* Hard to define a special return value to indicate hypercall restart.
77 * So just add a new mark, which is SMP safe
78 */
79 if (vcpu->arch.hypercall_continuation == 1)
80 vcpu->arch.hypercall_continuation = 0;
81 else
82 vmx_vcpu_increment_iip(vcpu);
83 }
86 void hyper_sched_op_compat(void)
87 {
88 VCPU *vcpu=current;
89 u64 r32,r33,ret;
90 vcpu_get_gr_nat(vcpu,16,&r32);
91 vcpu_get_gr_nat(vcpu,17,&r33);
92 ret=do_sched_op_compat(r32,r33);
93 vcpu_set_gr(vcpu, 8, ret, 0);
95 vmx_vcpu_increment_iip(vcpu);
96 }
98 void hyper_dom0_op(void)
99 {
100 VCPU *vcpu=current;
101 u64 r32,ret;
102 vcpu_get_gr_nat(vcpu,16,&r32);
103 ret=do_dom0_op(guest_handle_from_ptr(r32, dom0_op_t));
104 vcpu_set_gr(vcpu, 8, ret, 0);
106 vmx_vcpu_increment_iip(vcpu);
107 }
109 void hyper_event_channel_op(void)
110 {
111 VCPU *vcpu=current;
112 u64 r32,ret;
113 vcpu_get_gr_nat(vcpu,16,&r32);
114 ret=do_event_channel_op(guest_handle_from_ptr(r32, evtchn_op_t));
115 vcpu_set_gr(vcpu, 8, ret, 0);
116 vmx_vcpu_increment_iip(vcpu);
117 }
119 void hyper_xen_version(void)
120 {
121 VCPU *vcpu=current;
122 u64 r32,r33,ret;
123 vcpu_get_gr_nat(vcpu,16,&r32);
124 vcpu_get_gr_nat(vcpu,17,&r33);
125 ret=do_xen_version((int )r32,guest_handle_from_ptr(r33, void));
126 vcpu_set_gr(vcpu, 8, ret, 0);
127 vmx_vcpu_increment_iip(vcpu);
128 }
129 /*
130 static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
131 {
132 ia64_rr rr;
133 thash_cb_t *hcb;
134 hcb = vmx_vcpu_get_vtlb(vcpu);
135 rr = vmx_vcpu_rr(vcpu, va);
136 return thash_lock_tc(hcb, va ,1U<<rr.ps, rr.rid, DSIDE_TLB, lock);
137 }
138 */
139 /*
140 * Lock guest page in vTLB, so that it's not relinquished by recycle
141 * session when HV is servicing that hypercall.
142 */
144 /*
145 void hyper_lock_page(void)
146 {
147 //TODO:
148 VCPU *vcpu=current;
149 u64 va,lock, ret;
150 vcpu_get_gr_nat(vcpu,16,&va);
151 vcpu_get_gr_nat(vcpu,17,&lock);
152 ret=do_lock_page(vcpu, va, lock);
153 vcpu_set_gr(vcpu, 8, ret, 0);
155 vmx_vcpu_increment_iip(vcpu);
156 }
157 */
159 static int do_set_shared_page(VCPU *vcpu, u64 gpa)
160 {
161 u64 o_info;
162 struct domain *d = vcpu->domain;
163 struct vcpu *v;
164 if(vcpu->domain!=dom0)
165 return -EPERM;
166 o_info = (u64)vcpu->domain->shared_info;
167 d->shared_info= (shared_info_t *)domain_mpa_to_imva(vcpu->domain, gpa);
169 /* Copy existing shared info into new page */
170 if (o_info) {
171 memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
172 for_each_vcpu(d, v) {
173 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
174 }
175 /* If original page belongs to xen heap, then relinguish back
176 * to xen heap. Or else, leave to domain itself to decide.
177 */
178 if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
179 free_xenheap_page((void *)o_info);
180 } else
181 memset(d->shared_info, 0, PAGE_SIZE);
182 return 0;
183 }
185 void hyper_set_shared_page(void)
186 {
187 VCPU *vcpu=current;
188 u64 gpa,ret;
189 vcpu_get_gr_nat(vcpu,16,&gpa);
191 ret=do_set_shared_page(vcpu, gpa);
192 vcpu_set_gr(vcpu, 8, ret, 0);
194 vmx_vcpu_increment_iip(vcpu);
195 }
197 /*
198 void hyper_grant_table_op(void)
199 {
200 VCPU *vcpu=current;
201 u64 r32,r33,r34,ret;
202 vcpu_get_gr_nat(vcpu,16,&r32);
203 vcpu_get_gr_nat(vcpu,17,&r33);
204 vcpu_get_gr_nat(vcpu,18,&r34);
206 ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
207 vcpu_set_gr(vcpu, 8, ret, 0);
208 }
209 */