ia64/xen-unstable

view xen/arch/ia64/vmx/mmio.c @ 9507:67b24fc635ae

[IA64] warning fix

Fixed some compilation warnings

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 11 13:54:58 2006 -0600 (2006-04-11)
parents 8a551ec13d93
children 042b695ffc69
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * mmio.c: MMIO emulation components.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/vmx_mm_def.h>
27 #include <asm/gcc_intrin.h>
28 #include <linux/interrupt.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/privop.h>
31 #include <asm/types.h>
32 #include <public/hvm/ioreq.h>
33 #include <asm/mm.h>
34 #include <asm/vmx.h>
35 #include <public/event_channel.h>
36 #include <linux/event.h>
38 /*
39 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
40 {
41 int i;
42 for (i=0; mio_base[i].iot != NOT_IO; i++ ) {
43 if ( gpa >= mio_base[i].start && gpa <= mio_base[i].end )
44 return &mio_base[i];
45 }
46 return NULL;
47 }
48 */
50 #define PIB_LOW_HALF(ofst) !(ofst&(1<<20))
51 #define PIB_OFST_INTA 0x1E0000
52 #define PIB_OFST_XTP 0x1E0008
54 static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
56 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
57 {
58 switch (pib_off) {
59 case PIB_OFST_INTA:
60 panic("Undefined write on PIB INTA\n");
61 break;
62 case PIB_OFST_XTP:
63 if ( s == 1 && ma == 4 /* UC */) {
64 vmx_vcpu_get_plat(vcpu)->xtp = *(uint8_t *)src;
65 }
66 else {
67 panic("Undefined write on PIB XTP\n");
68 }
69 break;
70 default:
71 if ( PIB_LOW_HALF(pib_off) ) { // lower half
72 if ( s != 8 || ma != 0x4 /* UC */ ) {
73 panic("Undefined IPI-LHF write with s %ld, ma %d!\n", s, ma);
74 }
75 else {
76 write_ipi(vcpu, pib_off, *(uint64_t *)src);
77 // TODO for SM-VP
78 }
79 }
80 else { // upper half
81 printf("IPI-UHF write %lx\n",pib_off);
82 panic("Not support yet for SM-VP\n");
83 }
84 break;
85 }
86 }
88 static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
89 {
90 switch (pib_off) {
91 case PIB_OFST_INTA:
92 // todo --- emit on processor system bus.
93 if ( s == 1 && ma == 4) { // 1 byte load
94 // TODO: INTA read from IOSAPIC
95 }
96 else {
97 panic("Undefined read on PIB INTA\n");
98 }
99 break;
100 case PIB_OFST_XTP:
101 if ( s == 1 && ma == 4) {
102 *((uint8_t*)dest) = vmx_vcpu_get_plat(vcpu)->xtp;
103 }
104 else {
105 panic("Undefined read on PIB XTP\n");
106 }
107 break;
108 default:
109 if ( PIB_LOW_HALF(pib_off) ) { // lower half
110 if ( s != 8 || ma != 4 ) {
111 panic("Undefined IPI-LHF read!\n");
112 }
113 else {
114 #ifdef IPI_DEBUG
115 printf("IPI-LHF read %lx\n",pib_off);
116 #endif
117 *(uint64_t *)dest = 0; // TODO for SM-VP
118 }
119 }
120 else { // upper half
121 if ( s != 1 || ma != 4 ) {
122 panic("Undefined PIB-UHF read!\n");
123 }
124 else {
125 #ifdef IPI_DEBUG
126 printf("IPI-UHF read %lx\n",pib_off);
127 #endif
128 *(uint8_t *)dest = 0; // TODO for SM-VP
129 }
130 }
131 break;
132 }
133 }
135 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
136 {
137 struct vcpu *v = current;
138 vcpu_iodata_t *vio;
139 ioreq_t *p;
141 vio = get_vio(v->domain, v->vcpu_id);
142 if (vio == 0) {
143 panic("bad shared page: %lx", (unsigned long)vio);
144 }
145 p = &vio->vp_ioreq;
146 p->addr = pa;
147 p->size = s;
148 p->count = 1;
149 p->dir = dir;
150 if(dir==IOREQ_WRITE) //write;
151 p->u.data = *val;
152 p->pdata_valid = 0;
153 p->type = 1;
154 p->df = 0;
156 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
157 p->state = STATE_IOREQ_READY;
158 evtchn_send(iopacket_port(v));
159 vmx_wait_io();
160 if(dir==IOREQ_READ){ //read
161 *val=p->u.data;
162 }
163 return;
164 }
165 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
167 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
168 {
169 struct vcpu *v = current;
170 vcpu_iodata_t *vio;
171 ioreq_t *p;
173 vio = get_vio(v->domain, v->vcpu_id);
174 if (vio == 0) {
175 panic("bad shared page\n");
176 }
177 p = &vio->vp_ioreq;
178 p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
179 p->size = s;
180 p->count = 1;
181 p->dir = dir;
182 if(dir==IOREQ_WRITE) //write;
183 p->u.data = *val;
184 p->pdata_valid = 0;
185 p->type = 0;
186 p->df = 0;
188 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
189 p->state = STATE_IOREQ_READY;
190 evtchn_send(iopacket_port(v));
192 vmx_wait_io();
193 if(dir==IOREQ_READ){ //read
194 *val=p->u.data;
195 }
196 #ifdef DEBUG_PCI
197 if(dir==IOREQ_WRITE)
198 if(p->addr == 0xcf8UL)
199 printk("Write 0xcf8, with val [0x%lx]\n", p->u.data);
200 else
201 if(p->addr == 0xcfcUL)
202 printk("Read 0xcfc, with val [0x%lx]\n", p->u.data);
203 #endif //DEBUG_PCI
204 return;
205 }
207 extern struct vmx_mmio_handler vioapic_mmio_handler;
208 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
209 {
210 struct virtual_platform_def *v_plat;
211 //mmio_type_t iot;
212 unsigned long iot;
213 struct vmx_mmio_handler *vioapic_handler = &vioapic_mmio_handler;
214 iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
215 v_plat = vmx_vcpu_get_plat(vcpu);
217 switch (iot) {
218 case GPFN_PIB:
219 if(!dir)
220 pib_write(vcpu, dest, src_pa - v_plat->pib_base, s, ma);
221 else
222 pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
223 break;
224 case GPFN_GFW:
225 break;
226 case GPFN_IOSAPIC:
227 if (!dir)
228 vioapic_handler->write_handler(vcpu, src_pa, s, *dest);
229 else
230 *dest = vioapic_handler->read_handler(vcpu, src_pa, s);
231 break;
232 case GPFN_FRAME_BUFFER:
233 case GPFN_LOW_MMIO:
234 low_mmio_access(vcpu, src_pa, dest, s, dir);
235 break;
236 case GPFN_LEGACY_IO:
237 legacy_io_access(vcpu, src_pa, dest, s, dir);
238 break;
239 default:
240 panic("Bad I/O access\n");
241 break;
242 }
243 return;
244 }
246 /*
247 * Read or write data in guest virtual address mode.
248 */
249 /*
250 void
251 memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
252 {
253 uint64_t pa;
255 if (!vtlb->nomap)
256 panic("Normal memory write shouldn't go to this point!");
257 pa = PPN_2_PA(vtlb->ppn);
258 pa += POFFSET((u64)dest, vtlb->ps);
259 mmio_write (vcpu, src, pa, s, vtlb->ma);
260 }
263 void
264 memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
265 {
266 uint64_t pa = (uint64_t)dest;
267 int ma;
269 if ( pa & (1UL <<63) ) {
270 // UC
271 ma = 4;
272 pa <<=1;
273 pa >>=1;
274 }
275 else {
276 // WBL
277 ma = 0; // using WB for WBL
278 }
279 mmio_write (vcpu, src, pa, s, ma);
280 }
282 void
283 memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
284 {
285 uint64_t pa;
287 if (!vtlb->nomap)
288 panic("Normal memory write shouldn't go to this point!");
289 pa = PPN_2_PA(vtlb->ppn);
290 pa += POFFSET((u64)src, vtlb->ps);
292 mmio_read(vcpu, pa, dest, s, vtlb->ma);
293 }
295 void
296 memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
297 {
298 uint64_t pa = (uint64_t)src;
299 int ma;
301 if ( pa & (1UL <<63) ) {
302 // UC
303 ma = 4;
304 pa <<=1;
305 pa >>=1;
306 }
307 else {
308 // WBL
309 ma = 0; // using WB for WBL
310 }
311 mmio_read(vcpu, pa, dest, s, ma);
312 }
313 */
316 /*
317 * Deliver IPI message. (Only U-VP is supported now)
318 * offset: address offset to IPI space.
319 * value: deliver value.
320 */
321 static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
322 {
323 #ifdef IPI_DEBUG
324 printf ("deliver_ipi %lx %lx\n",dm,vector);
325 #endif
326 switch ( dm ) {
327 case 0: // INT
328 vmx_vcpu_pend_interrupt (vcpu, vector);
329 break;
330 case 2: // PMI
331 // TODO -- inject guest PMI
332 panic ("Inject guest PMI!\n");
333 break;
334 case 4: // NMI
335 vmx_vcpu_pend_interrupt (vcpu, 2);
336 break;
337 case 5: // INIT
338 // TODO -- inject guest INIT
339 panic ("Inject guest INIT!\n");
340 break;
341 case 7: // ExtINT
342 vmx_vcpu_pend_interrupt (vcpu, 0);
343 break;
344 case 1:
345 case 3:
346 case 6:
347 default:
348 panic ("Deliver reserved IPI!\n");
349 break;
350 }
351 }
353 /*
354 * TODO: Use hash table for the lookup.
355 */
356 static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
357 {
358 int i;
359 VCPU *vcpu;
360 LID lid;
361 for (i=0; i<MAX_VIRT_CPUS; i++) {
362 vcpu = d->vcpu[i];
363 if (!vcpu)
364 continue;
365 lid.val = VCPU(vcpu, lid);
366 if ( lid.id == id && lid.eid == eid ) {
367 return vcpu;
368 }
369 }
370 return NULL;
371 }
373 /*
374 * execute write IPI op.
375 */
376 static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
377 {
378 VCPU *target_cpu;
380 target_cpu = lid_2_vcpu(vcpu->domain,
381 ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
382 if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
383 if ( target_cpu == vcpu ) {
384 // IPI to self
385 deliver_ipi (vcpu, ((ipi_d_t)value).dm,
386 ((ipi_d_t)value).vector);
387 return 1;
388 }
389 else {
390 // TODO: send Host IPI to inject guest SMP IPI interruption
391 panic ("No SM-VP supported!\n");
392 return 0;
393 }
394 }
397 /*
398 dir 1: read 0:write
399 inst_type 0:integer 1:floating point
400 */
401 extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
402 #define SL_INTEGER 0 // store/load interger
403 #define SL_FLOATING 1 // store/load floating
405 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
406 {
407 REGS *regs;
408 IA64_BUNDLE bundle;
409 int slot, dir=0, inst_type;
410 size_t size;
411 u64 data, value,post_update, slot1a, slot1b, temp;
412 INST64 inst;
413 regs=vcpu_regs(vcpu);
414 bundle = __vmx_get_domain_bundle(regs->cr_iip);
415 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
416 if (!slot) inst.inst = bundle.slot0;
417 else if (slot == 1){
418 slot1a=bundle.slot1a;
419 slot1b=bundle.slot1b;
420 inst.inst =slot1a + (slot1b<<18);
421 }
422 else if (slot == 2) inst.inst = bundle.slot2;
425 // Integer Load/Store
426 if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
427 inst_type = SL_INTEGER; //
428 size=(inst.M1.x6&0x3);
429 if((inst.M1.x6>>2)>0xb){ // write
430 dir=IOREQ_WRITE; //write
431 vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
432 }else if((inst.M1.x6>>2)<0xb){ // read
433 dir=IOREQ_READ;
434 vcpu_get_gr_nat(vcpu,inst.M1.r1,&value);
435 }
436 }
437 // Integer Load + Reg update
438 else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
439 inst_type = SL_INTEGER;
440 dir = IOREQ_READ; //write
441 size = (inst.M2.x6&0x3);
442 vcpu_get_gr_nat(vcpu,inst.M2.r1,&value);
443 vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
444 vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
445 temp += post_update;
446 vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
447 }
448 // Integer Load/Store + Imm update
449 else if(inst.M3.major==5){
450 inst_type = SL_INTEGER; //
451 size=(inst.M3.x6&0x3);
452 if((inst.M5.x6>>2)>0xb){ // write
453 dir=IOREQ_WRITE; //write
454 vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
455 vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
456 post_update = (inst.M5.i<<7)+inst.M5.imm7;
457 if(inst.M5.s)
458 temp -= post_update;
459 else
460 temp += post_update;
461 vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
463 }else if((inst.M3.x6>>2)<0xb){ // read
464 dir=IOREQ_READ;
465 vcpu_get_gr_nat(vcpu,inst.M3.r1,&value);
466 vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
467 post_update = (inst.M3.i<<7)+inst.M3.imm7;
468 if(inst.M3.s)
469 temp -= post_update;
470 else
471 temp += post_update;
472 vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
474 }
475 }
476 // Floating-point Load/Store
477 // else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
478 // inst_type=SL_FLOATING; //fp
479 // dir=IOREQ_READ;
480 // size=3; //ldfd
481 // }
482 else{
483 printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
484 while(1);
485 }
487 size = 1 << size;
488 if(dir==IOREQ_WRITE){
489 mmio_access(vcpu, padr, &data, size, ma, dir);
490 }else{
491 mmio_access(vcpu, padr, &data, size, ma, dir);
492 if(size==0)
493 data = (value & 0xffffffffffffff00U) | (data & 0xffU);
494 else if(size==1)
495 data = (value & 0xffffffffffff0000U) | (data & 0xffffU);
496 else if(size==2)
497 data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
499 if(inst_type==SL_INTEGER){ //gp
500 vcpu_set_gr(vcpu,inst.M1.r1,data,0);
501 }else{
502 panic("Don't support ldfd now !");
503 /* switch(inst.M6.f1){
505 case 6:
506 regs->f6=(struct ia64_fpreg)data;
507 case 7:
508 regs->f7=(struct ia64_fpreg)data;
509 case 8:
510 regs->f8=(struct ia64_fpreg)data;
511 case 9:
512 regs->f9=(struct ia64_fpreg)data;
513 case 10:
514 regs->f10=(struct ia64_fpreg)data;
515 case 11:
516 regs->f11=(struct ia64_fpreg)data;
517 default :
518 ia64_ldfs(inst.M6.f1,&data);
519 }
520 */
521 }
522 }
523 vmx_vcpu_increment_iip(vcpu);
524 }