ia64/xen-unstable

view xen/arch/ia64/mmio.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents 8651a99cdc09
children a83ac0806d6b
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * mmio.c: MMIO emulation components.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/vmx_mm_def.h>
27 #include <asm/gcc_intrin.h>
28 #include <xen/interrupt.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/privop.h>
31 #include <asm/types.h>
32 #include <public/io/ioreq.h>
33 #include <asm/mm.h>
34 #include <asm/vmx.h>
36 /*
37 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
38 {
39 int i;
40 for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
41 if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
42 return &mio_base[i];
43 }
44 return NULL;
45 }
46 */
48 #define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
49 #define PIB_OFST_INTA 0x1E0000
50 #define PIB_OFST_XTP 0x1E0008
52 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
53 {
54 switch (pib_off) {
55 case PIB_OFST_INTA:
56 panic("Undefined write on PIB INTA\n");
57 break;
58 case PIB_OFST_XTP:
59 if ( s == 1 && ma == 4 /* UC */) {
60 vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
61 }
62 else {
63 panic("Undefined write on PIB XTP\n");
64 }
65 break;
66 default:
67 if ( PIB_LOW_HALF(pib_off) ) { // lower half
68 if ( s != 8 || ma != 0x4 /* UC */ ) {
69 panic("Undefined IPI-LHF write!\n");
70 }
71 else {
72 write_ipi(vcpu, pib_off, *(uint64_t *)src);
73 // TODO for SM-VP
74 }
75 }
76 else { // upper half
77 printf("IPI-UHF write %lx\n",pib_off);
78 panic("Not support yet for SM-VP\n");
79 }
80 break;
81 }
82 }
84 static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
85 {
86 switch (pib_off) {
87 case PIB_OFST_INTA:
88 // todo --- emit on processor system bus.
89 if ( s == 1 && ma == 4) { // 1 byte load
90 // TODO: INTA read from IOSAPIC
91 }
92 else {
93 panic("Undefined read on PIB INTA\n");
94 }
95 break;
96 case PIB_OFST_XTP:
97 if ( s == 1 && ma == 4) {
98 *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
99 }
100 else {
101 panic("Undefined read on PIB XTP\n");
102 }
103 break;
104 default:
105 if ( PIB_LOW_HALF(pib_off) ) { // lower half
106 if ( s != 8 || ma != 4 ) {
107 panic("Undefined IPI-LHF read!\n");
108 }
109 else {
110 #ifdef IPI_DEBUG
111 printf("IPI-LHF read %lx\n",pib_off);
112 #endif
113 *(uint64_t *)dest = 0; // TODO for SM-VP
114 }
115 }
116 else { // upper half
117 if ( s != 1 || ma != 4 ) {
118 panic("Undefined PIB-UHF read!\n");
119 }
120 else {
121 #ifdef IPI_DEBUG
122 printf("IPI-UHF read %lx\n",pib_off);
123 #endif
124 *(uint8_t *)dest = 0; // TODO for SM-VP
125 }
126 }
127 break;
128 }
129 }
131 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
132 {
133 struct vcpu *v = current;
134 vcpu_iodata_t *vio;
135 ioreq_t *p;
136 unsigned long addr;
138 vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
139 if (vio == 0) {
140 panic("bad shared page: %lx", (unsigned long)vio);
141 }
142 p = &vio->vp_ioreq;
143 p->addr = pa;
144 p->size = 1<<s;
145 p->count = 1;
146 p->dir = dir;
147 if(dir==IOREQ_WRITE) //write;
148 p->u.data = *val;
149 p->pdata_valid = 0;
150 p->port_mm = 1;
151 p->df = 0;
153 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
154 p->state = STATE_IOREQ_READY;
155 evtchn_send(IOPACKET_PORT);
156 vmx_wait_io();
157 if(dir){ //read
158 *val=p->u.data;
159 }
160 return;
161 }
162 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
164 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
165 {
166 struct vcpu *v = current;
167 vcpu_iodata_t *vio;
168 ioreq_t *p;
169 unsigned long addr;
171 vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
172 if (vio == 0) {
173 panic("bad shared page: %lx");
174 }
175 p = &vio->vp_ioreq;
176 p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
177 p->size = 1<<s;
178 p->count = 1;
179 p->dir = dir;
180 if(dir==IOREQ_WRITE) //write;
181 p->u.data = *val;
182 p->pdata_valid = 0;
183 p->port_mm = 0;
184 p->df = 0;
186 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
187 p->state = STATE_IOREQ_READY;
188 evtchn_send(IOPACKET_PORT);
189 vmx_wait_io();
190 if(dir){ //read
191 *val=p->u.data;
192 }
193 return;
194 }
196 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
197 {
198 struct virutal_platform_def *v_plat;
199 //mmio_type_t iot;
200 unsigned long iot;
201 iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
202 v_plat = vmx_vcpu_get_plat(vcpu);
204 switch (iot) {
205 case GPFN_PIB:
206 if(!dir)
207 pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
208 else
209 pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
210 break;
211 case GPFN_GFW:
212 break;
213 case GPFN_FRAME_BUFFER:
214 case GPFN_LOW_MMIO:
215 low_mmio_access(vcpu, src_pa, dest, s, dir);
216 break;
217 case GPFN_LEGACY_IO:
218 legacy_io_access(vcpu, src_pa, dest, s, dir);
219 break;
220 case GPFN_IOSAPIC:
221 default:
222 panic("Bad I/O access\n");
223 break;
224 }
225 return;
226 }
228 /*
229 * Read or write data in guest virtual address mode.
230 */
231 /*
232 void
233 memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
234 {
235 uint64_t pa;
237 if (!vtlb->nomap)
238 panic("Normal memory write shouldn't go to this point!");
239 pa = PPN_2_PA(vtlb->ppn);
240 pa += POFFSET((u64)dest, vtlb->ps);
241 mmio_write (vcpu, src, pa, s, vtlb->ma);
242 }
245 void
246 memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
247 {
248 uint64_t pa = (uint64_t)dest;
249 int ma;
251 if ( pa & (1UL <<63) ) {
252 // UC
253 ma = 4;
254 pa <<=1;
255 pa >>=1;
256 }
257 else {
258 // WBL
259 ma = 0; // using WB for WBL
260 }
261 mmio_write (vcpu, src, pa, s, ma);
262 }
264 void
265 memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
266 {
267 uint64_t pa;
269 if (!vtlb->nomap)
270 panic("Normal memory write shouldn't go to this point!");
271 pa = PPN_2_PA(vtlb->ppn);
272 pa += POFFSET((u64)src, vtlb->ps);
274 mmio_read(vcpu, pa, dest, s, vtlb->ma);
275 }
277 void
278 memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
279 {
280 uint64_t pa = (uint64_t)src;
281 int ma;
283 if ( pa & (1UL <<63) ) {
284 // UC
285 ma = 4;
286 pa <<=1;
287 pa >>=1;
288 }
289 else {
290 // WBL
291 ma = 0; // using WB for WBL
292 }
293 mmio_read(vcpu, pa, dest, s, ma);
294 }
295 */
298 /*
299 * Deliver IPI message. (Only U-VP is supported now)
300 * offset: address offset to IPI space.
301 * value: deliver value.
302 */
303 static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
304 {
305 #ifdef IPI_DEBUG
306 printf ("deliver_ipi %lx %lx\n",dm,vector);
307 #endif
308 switch ( dm ) {
309 case 0: // INT
310 vmx_vcpu_pend_interrupt (vcpu, vector);
311 break;
312 case 2: // PMI
313 // TODO -- inject guest PMI
314 panic ("Inject guest PMI!\n");
315 break;
316 case 4: // NMI
317 vmx_vcpu_pend_interrupt (vcpu, 2);
318 break;
319 case 5: // INIT
320 // TODO -- inject guest INIT
321 panic ("Inject guest INIT!\n");
322 break;
323 case 7: // ExtINT
324 vmx_vcpu_pend_interrupt (vcpu, 0);
325 break;
326 case 1:
327 case 3:
328 case 6:
329 default:
330 panic ("Deliver reserved IPI!\n");
331 break;
332 }
333 }
335 /*
336 * TODO: Use hash table for the lookup.
337 */
338 static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
339 {
340 int i;
341 VCPU *vcpu;
342 LID lid;
343 for (i=0; i<MAX_VIRT_CPUS; i++) {
344 vcpu = d->vcpu[i];
345 lid.val = VPD_CR(vcpu, lid);
346 if ( lid.id == id && lid.eid == eid ) {
347 return vcpu;
348 }
349 }
350 return NULL;
351 }
353 /*
354 * execute write IPI op.
355 */
356 static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
357 {
358 VCPU *target_cpu;
360 target_cpu = lid_2_vcpu(vcpu->domain,
361 ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
362 if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
363 if ( target_cpu == vcpu ) {
364 // IPI to self
365 deliver_ipi (vcpu, ((ipi_d_t)value).dm,
366 ((ipi_d_t)value).vector);
367 return 1;
368 }
369 else {
370 // TODO: send Host IPI to inject guest SMP IPI interruption
371 panic ("No SM-VP supported!\n");
372 return 0;
373 }
374 }
377 /*
378 dir 1: read 0:write
379 inst_type 0:integer 1:floating point
380 */
381 extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
384 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
385 {
386 REGS *regs;
387 IA64_BUNDLE bundle;
388 int slot, dir, inst_type=0;
389 size_t size;
390 u64 data, value, slot1a, slot1b;
391 INST64 inst;
392 regs=vcpu_regs(vcpu);
393 bundle = __vmx_get_domain_bundle(regs->cr_iip);
394 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
395 if (!slot) inst.inst = bundle.slot0;
396 else if (slot == 1){
397 slot1a=bundle.slot1a;
398 slot1b=bundle.slot1b;
399 inst.inst =slot1a + (slot1b<<18);
400 }
401 else if (slot == 2) inst.inst = bundle.slot2;
403 if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
404 inst_type=0; //fp
405 size=(inst.M1.x6&0x3);
406 if((inst.M1.x6>>2)>0xb){ // write
407 vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
408 dir=IOREQ_WRITE; //write
409 }else if((inst.M1.x6>>2)<0xb){ // read
410 vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
411 dir=IOREQ_READ;
412 }else{
413 printf("This memory access instruction can't be emulated one : %lx\n",inst.inst);
414 while(1);
415 }
416 }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
417 inst_type=1; //fp
418 dir=IOREQ_READ;
419 size=3; //ldfd
420 }else{
421 printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
422 while(1);
423 }
425 if(dir==IOREQ_WRITE){
426 mmio_access(vcpu, padr, &data, size, ma, dir);
427 }else{
428 mmio_access(vcpu, padr, &data, size, ma, dir);
429 if(size==0)
430 data = (value & 0xffffffffffffff00U) | (data & 0xffU);
431 else if(size==1)
432 data = (value & 0xffffffffffff0000U) | (data & 0xffffU);
433 else if(size==2)
434 data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
436 if(inst_type==0){ //gp
437 vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
438 }else{
439 panic("Don't support ldfd now !");
440 /* switch(inst.M6.f1){
442 case 6:
443 regs->f6=(struct ia64_fpreg)data;
444 case 7:
445 regs->f7=(struct ia64_fpreg)data;
446 case 8:
447 regs->f8=(struct ia64_fpreg)data;
448 case 9:
449 regs->f9=(struct ia64_fpreg)data;
450 case 10:
451 regs->f10=(struct ia64_fpreg)data;
452 case 11:
453 regs->f11=(struct ia64_fpreg)data;
454 default :
455 ia64_ldfs(inst.M6.f1,&data);
456 }
457 */
458 }
459 }
460 vmx_vcpu_increment_iip(vcpu);
461 }