ia64/xen-unstable

view xen/arch/ia64/vmx/mmio.c @ 7720:b2ea26d2099a

Support recent change moving virtual IOAPIC model into Xen for
ia64/VTI. Now ia64/vti will create links to arch/x86/dm/vmx_vioapic.c
and include/x86/vmx_vlapic.h.

Firstly, a small change to common virtual IOAPIC model to be used by
both sides. Also some compilation fix to tools is included in first
one. Secondly, there are ia64-specific changes to hook to common
IOAPIC model.

Based on this patch upon latest xen-ia64-unstable tip, we can see
multiple domains working again on XEN/IA64, including both domU and
VTI domain simultaneously in run-time.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by Eddie Dong <eddie.dong@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Nov 09 14:53:12 2005 +0100 (2005-11-09)
parents 06d84bf87159
children 40fc727dd1c0
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * mmio.c: MMIO emulation components.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/vmx_mm_def.h>
27 #include <asm/gcc_intrin.h>
28 #include <linux/interrupt.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/privop.h>
31 #include <asm/types.h>
32 #include <public/io/ioreq.h>
33 #include <asm/mm.h>
34 #include <asm/vmx.h>
36 /*
37 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
38 {
39 int i;
40 for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
41 if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
42 return &mio_base[i];
43 }
44 return NULL;
45 }
46 */
48 #define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
49 #define PIB_OFST_INTA 0x1E0000
50 #define PIB_OFST_XTP 0x1E0008
52 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
53 {
54 switch (pib_off) {
55 case PIB_OFST_INTA:
56 panic("Undefined write on PIB INTA\n");
57 break;
58 case PIB_OFST_XTP:
59 if ( s == 1 && ma == 4 /* UC */) {
60 vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
61 }
62 else {
63 panic("Undefined write on PIB XTP\n");
64 }
65 break;
66 default:
67 if ( PIB_LOW_HALF(pib_off) ) { // lower half
68 if ( s != 8 || ma != 0x4 /* UC */ ) {
69 panic("Undefined IPI-LHF write with s %d, ma %d!\n", s, ma);
70 }
71 else {
72 write_ipi(vcpu, pib_off, *(uint64_t *)src);
73 // TODO for SM-VP
74 }
75 }
76 else { // upper half
77 printf("IPI-UHF write %lx\n",pib_off);
78 panic("Not support yet for SM-VP\n");
79 }
80 break;
81 }
82 }
84 static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
85 {
86 switch (pib_off) {
87 case PIB_OFST_INTA:
88 // todo --- emit on processor system bus.
89 if ( s == 1 && ma == 4) { // 1 byte load
90 // TODO: INTA read from IOSAPIC
91 }
92 else {
93 panic("Undefined read on PIB INTA\n");
94 }
95 break;
96 case PIB_OFST_XTP:
97 if ( s == 1 && ma == 4) {
98 *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
99 }
100 else {
101 panic("Undefined read on PIB XTP\n");
102 }
103 break;
104 default:
105 if ( PIB_LOW_HALF(pib_off) ) { // lower half
106 if ( s != 8 || ma != 4 ) {
107 panic("Undefined IPI-LHF read!\n");
108 }
109 else {
110 #ifdef IPI_DEBUG
111 printf("IPI-LHF read %lx\n",pib_off);
112 #endif
113 *(uint64_t *)dest = 0; // TODO for SM-VP
114 }
115 }
116 else { // upper half
117 if ( s != 1 || ma != 4 ) {
118 panic("Undefined PIB-UHF read!\n");
119 }
120 else {
121 #ifdef IPI_DEBUG
122 printf("IPI-UHF read %lx\n",pib_off);
123 #endif
124 *(uint8_t *)dest = 0; // TODO for SM-VP
125 }
126 }
127 break;
128 }
129 }
131 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
132 {
133 struct vcpu *v = current;
134 vcpu_iodata_t *vio;
135 ioreq_t *p;
136 unsigned long addr;
138 vio = get_vio(v->domain, v->vcpu_id);
139 if (vio == 0) {
140 panic("bad shared page: %lx", (unsigned long)vio);
141 }
142 p = &vio->vp_ioreq;
143 p->addr = pa;
144 p->size = s;
145 p->count = 1;
146 p->dir = dir;
147 if(dir==IOREQ_WRITE) //write;
148 p->u.data = *val;
149 p->pdata_valid = 0;
150 p->type = 1;
151 p->df = 0;
153 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
154 p->state = STATE_IOREQ_READY;
155 evtchn_send(iopacket_port(v->domain));
156 vmx_wait_io();
157 if(dir==IOREQ_READ){ //read
158 *val=p->u.data;
159 }
160 return;
161 }
162 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
164 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
165 {
166 struct vcpu *v = current;
167 vcpu_iodata_t *vio;
168 ioreq_t *p;
169 unsigned long addr;
171 vio = get_vio(v->domain, v->vcpu_id);
172 if (vio == 0) {
173 panic("bad shared page: %lx");
174 }
175 p = &vio->vp_ioreq;
176 p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
177 p->size = s;
178 p->count = 1;
179 p->dir = dir;
180 if(dir==IOREQ_WRITE) //write;
181 p->u.data = *val;
182 p->pdata_valid = 0;
183 p->type = 0;
184 p->df = 0;
186 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
187 p->state = STATE_IOREQ_READY;
188 evtchn_send(iopacket_port(v->domain));
190 vmx_wait_io();
191 if(dir==IOREQ_READ){ //read
192 *val=p->u.data;
193 }
194 #ifdef DEBUG_PCI
195 if(dir==IOREQ_WRITE)
196 if(p->addr == 0xcf8UL)
197 printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
198 else
199 if(p->addr == 0xcfcUL)
200 printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
201 #endif //DEBUG_PCI
202 return;
203 }
205 extern struct vmx_mmio_handler vioapic_mmio_handler;
206 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
207 {
208 struct virutal_platform_def *v_plat;
209 //mmio_type_t iot;
210 unsigned long iot;
211 struct vmx_mmio_handler *vioapic_handler = &vioapic_mmio_handler;
212 iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
213 v_plat = vmx_vcpu_get_plat(vcpu);
215 switch (iot) {
216 case GPFN_PIB:
217 if(!dir)
218 pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
219 else
220 pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
221 break;
222 case GPFN_GFW:
223 break;
224 case GPFN_IOSAPIC:
225 if (!dir)
226 vioapic_handler->write_handler(vcpu, src_pa, s, *dest);
227 else
228 *dest = vioapic_handler->read_handler(vcpu, src_pa, s);
229 break;
230 case GPFN_FRAME_BUFFER:
231 case GPFN_LOW_MMIO:
232 low_mmio_access(vcpu, src_pa, dest, s, dir);
233 break;
234 case GPFN_LEGACY_IO:
235 legacy_io_access(vcpu, src_pa, dest, s, dir);
236 break;
237 default:
238 panic("Bad I/O access\n");
239 break;
240 }
241 return;
242 }
244 /*
245 * Read or write data in guest virtual address mode.
246 */
247 /*
248 void
249 memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
250 {
251 uint64_t pa;
253 if (!vtlb->nomap)
254 panic("Normal memory write shouldn't go to this point!");
255 pa = PPN_2_PA(vtlb->ppn);
256 pa += POFFSET((u64)dest, vtlb->ps);
257 mmio_write (vcpu, src, pa, s, vtlb->ma);
258 }
261 void
262 memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
263 {
264 uint64_t pa = (uint64_t)dest;
265 int ma;
267 if ( pa & (1UL <<63) ) {
268 // UC
269 ma = 4;
270 pa <<=1;
271 pa >>=1;
272 }
273 else {
274 // WBL
275 ma = 0; // using WB for WBL
276 }
277 mmio_write (vcpu, src, pa, s, ma);
278 }
280 void
281 memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
282 {
283 uint64_t pa;
285 if (!vtlb->nomap)
286 panic("Normal memory write shouldn't go to this point!");
287 pa = PPN_2_PA(vtlb->ppn);
288 pa += POFFSET((u64)src, vtlb->ps);
290 mmio_read(vcpu, pa, dest, s, vtlb->ma);
291 }
293 void
294 memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
295 {
296 uint64_t pa = (uint64_t)src;
297 int ma;
299 if ( pa & (1UL <<63) ) {
300 // UC
301 ma = 4;
302 pa <<=1;
303 pa >>=1;
304 }
305 else {
306 // WBL
307 ma = 0; // using WB for WBL
308 }
309 mmio_read(vcpu, pa, dest, s, ma);
310 }
311 */
314 /*
315 * Deliver IPI message. (Only U-VP is supported now)
316 * offset: address offset to IPI space.
317 * value: deliver value.
318 */
319 static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
320 {
321 #ifdef IPI_DEBUG
322 printf ("deliver_ipi %lx %lx\n",dm,vector);
323 #endif
324 switch ( dm ) {
325 case 0: // INT
326 vmx_vcpu_pend_interrupt (vcpu, vector);
327 break;
328 case 2: // PMI
329 // TODO -- inject guest PMI
330 panic ("Inject guest PMI!\n");
331 break;
332 case 4: // NMI
333 vmx_vcpu_pend_interrupt (vcpu, 2);
334 break;
335 case 5: // INIT
336 // TODO -- inject guest INIT
337 panic ("Inject guest INIT!\n");
338 break;
339 case 7: // ExtINT
340 vmx_vcpu_pend_interrupt (vcpu, 0);
341 break;
342 case 1:
343 case 3:
344 case 6:
345 default:
346 panic ("Deliver reserved IPI!\n");
347 break;
348 }
349 }
351 /*
352 * TODO: Use hash table for the lookup.
353 */
354 static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
355 {
356 int i;
357 VCPU *vcpu;
358 LID lid;
359 for (i=0; i<MAX_VIRT_CPUS; i++) {
360 vcpu = d->vcpu[i];
361 if (!vcpu)
362 continue;
363 lid.val = VCPU(vcpu, lid);
364 if ( lid.id == id && lid.eid == eid ) {
365 return vcpu;
366 }
367 }
368 return NULL;
369 }
371 /*
372 * execute write IPI op.
373 */
374 static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
375 {
376 VCPU *target_cpu;
378 target_cpu = lid_2_vcpu(vcpu->domain,
379 ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
380 if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
381 if ( target_cpu == vcpu ) {
382 // IPI to self
383 deliver_ipi (vcpu, ((ipi_d_t)value).dm,
384 ((ipi_d_t)value).vector);
385 return 1;
386 }
387 else {
388 // TODO: send Host IPI to inject guest SMP IPI interruption
389 panic ("No SM-VP supported!\n");
390 return 0;
391 }
392 }
395 /*
396 dir 1: read 0:write
397 inst_type 0:integer 1:floating point
398 */
399 extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
400 #define SL_INTEGER 0 // store/load interger
401 #define SL_FLOATING 1 // store/load floating
403 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
404 {
405 REGS *regs;
406 IA64_BUNDLE bundle;
407 int slot, dir, inst_type;
408 size_t size;
409 u64 data, value,post_update, slot1a, slot1b, temp;
410 INST64 inst;
411 regs=vcpu_regs(vcpu);
412 bundle = __vmx_get_domain_bundle(regs->cr_iip);
413 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
414 if (!slot) inst.inst = bundle.slot0;
415 else if (slot == 1){
416 slot1a=bundle.slot1a;
417 slot1b=bundle.slot1b;
418 inst.inst =slot1a + (slot1b<<18);
419 }
420 else if (slot == 2) inst.inst = bundle.slot2;
423 // Integer Load/Store
424 if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
425 inst_type = SL_INTEGER; //
426 size=(inst.M1.x6&0x3);
427 if((inst.M1.x6>>2)>0xb){ // write
428 dir=IOREQ_WRITE; //write
429 vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
430 }else if((inst.M1.x6>>2)<0xb){ // read
431 dir=IOREQ_READ;
432 vcpu_get_gr_nat(vcpu,inst.M1.r1,&value);
433 }
434 }
435 // Integer Load + Reg update
436 else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
437 inst_type = SL_INTEGER;
438 dir = IOREQ_READ; //write
439 size = (inst.M2.x6&0x3);
440 vcpu_get_gr_nat(vcpu,inst.M2.r1,&value);
441 vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
442 vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
443 temp += post_update;
444 vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
445 }
446 // Integer Load/Store + Imm update
447 else if(inst.M3.major==5){
448 inst_type = SL_INTEGER; //
449 size=(inst.M3.x6&0x3);
450 if((inst.M5.x6>>2)>0xb){ // write
451 dir=IOREQ_WRITE; //write
452 vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
453 vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
454 post_update = (inst.M5.i<<7)+inst.M5.imm7;
455 if(inst.M5.s)
456 temp -= post_update;
457 else
458 temp += post_update;
459 vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
461 }else if((inst.M3.x6>>2)<0xb){ // read
462 dir=IOREQ_READ;
463 vcpu_get_gr_nat(vcpu,inst.M3.r1,&value);
464 vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
465 post_update = (inst.M3.i<<7)+inst.M3.imm7;
466 if(inst.M3.s)
467 temp -= post_update;
468 else
469 temp += post_update;
470 vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
472 }
473 }
474 // Floating-point Load/Store
475 // else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
476 // inst_type=SL_FLOATING; //fp
477 // dir=IOREQ_READ;
478 // size=3; //ldfd
479 // }
480 else{
481 printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
482 while(1);
483 }
485 size = 1 << size;
486 if(dir==IOREQ_WRITE){
487 mmio_access(vcpu, padr, &data, size, ma, dir);
488 }else{
489 mmio_access(vcpu, padr, &data, size, ma, dir);
490 if(size==0)
491 data = (value & 0xffffffffffffff00U) | (data & 0xffU);
492 else if(size==1)
493 data = (value & 0xffffffffffff0000U) | (data & 0xffffU);
494 else if(size==2)
495 data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
497 if(inst_type==SL_INTEGER){ //gp
498 vcpu_set_gr(vcpu,inst.M1.r1,data,0);
499 }else{
500 panic("Don't support ldfd now !");
501 /* switch(inst.M6.f1){
503 case 6:
504 regs->f6=(struct ia64_fpreg)data;
505 case 7:
506 regs->f7=(struct ia64_fpreg)data;
507 case 8:
508 regs->f8=(struct ia64_fpreg)data;
509 case 9:
510 regs->f9=(struct ia64_fpreg)data;
511 case 10:
512 regs->f10=(struct ia64_fpreg)data;
513 case 11:
514 regs->f11=(struct ia64_fpreg)data;
515 default :
516 ia64_ldfs(inst.M6.f1,&data);
517 }
518 */
519 }
520 }
521 vmx_vcpu_increment_iip(vcpu);
522 }