ia64/xen-unstable

view xen/arch/ia64/vmx/mmio.c @ 16682:7515dc56c124

[IA64] Sort out the XEN_IA64_OPTF_IDENT_MAP_REG[457] constants confusion

Currently the constants are used for two different purpose.
one is for the OPTF hypercall sub command.
another is bit flag for struct opt_feature::mask.
They are different spaces, split them out.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Sun Dec 30 13:02:16 2007 -0700 (2007-12-30)
parents 4dd302c8d59c
children 430a036ab261
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * mmio.c: MMIO emulation components.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <xen/mm.h>
25 #include <asm/vmx_mm_def.h>
26 #include <asm/gcc_intrin.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/bundle.h>
30 #include <asm/types.h>
31 #include <public/hvm/ioreq.h>
32 #include <asm/vmx.h>
33 #include <public/event_channel.h>
34 #include <public/xen.h>
35 #include <linux/event.h>
36 #include <xen/domain.h>
37 #include <asm/viosapic.h>
38 #include <asm/vlsapic.h>
39 #include <asm/hvm/vacpi.h>
40 #include <asm/hvm/support.h>
41 #include <public/hvm/save.h>
43 #define HVM_BUFFERED_IO_RANGE_NR 1
45 struct hvm_buffered_io_range {
46 unsigned long start_addr;
47 unsigned long length;
48 };
50 static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
51 static struct hvm_buffered_io_range
52 *hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
53 {
54 &buffered_stdvga_range
55 };
57 static int hvm_buffered_io_intercept(ioreq_t *p)
58 {
59 struct vcpu *v = current;
60 buffered_iopage_t *pg =
61 (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);
62 buf_ioreq_t bp;
63 int i, qw = 0;
65 /* Ensure buffered_iopage fits in a page */
66 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
68 /* ignore READ ioreq_t and anything buffered io can't deal with */
69 if (p->dir == IOREQ_READ || p->addr > 0xFFFFFUL ||
70 p->data_is_ptr || p->count != 1)
71 return 0;
73 for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {
74 if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
75 p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
76 hvm_buffered_io_ranges[i]->length)
77 break;
78 }
80 if (i == HVM_BUFFERED_IO_RANGE_NR)
81 return 0;
83 bp.type = p->type;
84 bp.dir = p->dir;
85 switch (p->size) {
86 case 1:
87 bp.size = 0;
88 break;
89 case 2:
90 bp.size = 1;
91 break;
92 case 4:
93 bp.size = 2;
94 break;
95 case 8:
96 bp.size = 3;
97 qw = 1;
98 break;
99 default:
100 gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
101 return 0;
102 }
103 bp.data = p->data;
104 bp.addr = p->addr;
106 spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
108 if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {
109 /* the queue is full.
110 * send the iopacket through the normal path.
111 * NOTE: The arithimetic operation could handle the situation for
112 * write_pointer overflow.
113 */
114 spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
115 return 0;
116 }
118 memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
119 &bp, sizeof(bp));
121 if (qw) {
122 bp.data = p->data >> 32;
123 memcpy(&pg->buf_ioreq[(pg->write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM],
124 &bp, sizeof(bp));
125 }
127 /* Make the ioreq_t visible before write_pointer */
128 wmb();
129 pg->write_pointer += qw ? 2 : 1;
131 spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
133 return 1;
134 }
136 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
137 {
138 struct vcpu *v = current;
139 vcpu_iodata_t *vio;
140 ioreq_t *p;
142 vio = get_vio(v);
143 if (!vio)
144 panic_domain(NULL, "bad shared page");
146 p = &vio->vp_ioreq;
148 p->addr = pa;
149 p->size = s;
150 p->count = 1;
151 if (dir == IOREQ_WRITE)
152 p->data = *val;
153 else
154 p->data = 0;
155 p->data_is_ptr = 0;
156 p->dir = dir;
157 p->df = 0;
158 p->type = 1;
160 p->io_count++;
162 if (hvm_buffered_io_intercept(p)) {
163 p->state = STATE_IORESP_READY;
164 vmx_io_assist(v);
165 if (dir != IOREQ_READ)
166 return;
167 }
169 vmx_send_assist_req(v);
170 if (dir == IOREQ_READ)
171 *val = p->data;
173 return;
174 }
176 static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
177 {
178 struct buffered_piopage *pio_page =
179 (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);
180 spinlock_t *pio_lock;
181 struct pio_buffer *piobuf;
182 uint32_t pointer, page_offset;
184 if (p->addr == 0x1F0)
185 piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
186 else if (p->addr == 0x170)
187 piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
188 else
189 return 0;
191 if (p->size != 2 && p->size != 4)
192 return 0;
194 pio_lock = &current->domain->arch.hvm_domain.buf_pioreq.lock;
195 spin_lock(pio_lock);
197 pointer = piobuf->pointer;
198 page_offset = piobuf->page_offset;
200 /* sanity check */
201 if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
202 goto unlock_out;
203 if (page_offset + piobuf->data_end > PAGE_SIZE)
204 goto unlock_out;
206 if (pointer + p->size < piobuf->data_end) {
207 uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
208 if (p->dir == IOREQ_WRITE) {
209 if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
210 *(uint32_t *)bufp = *val;
211 else
212 memcpy(bufp, val, p->size);
213 } else {
214 if (likely(p->size == 4 && (((long)bufp & 3) == 0))) {
215 *val = *(uint32_t *)bufp;
216 } else {
217 *val = 0;
218 memcpy(val, bufp, p->size);
219 }
220 }
221 piobuf->pointer += p->size;
222 spin_unlock(pio_lock);
224 p->state = STATE_IORESP_READY;
225 vmx_io_assist(current);
226 return 1;
227 }
229 unlock_out:
230 spin_unlock(pio_lock);
231 return 0;
232 }
234 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
236 static void __vmx_identity_mapping_save(int on,
237 const struct identity_mapping* im,
238 struct hvm_hw_ia64_identity_mapping *im_save)
239 {
240 im_save->on = !!on;
241 if (!on) {
242 im_save->pgprot = 0;
243 im_save->key = 0;
244 } else {
245 im_save->pgprot = im->pgprot;
246 im_save->key = im->key;
247 }
248 }
250 static int vmx_identity_mappings_save(struct domain *d,
251 hvm_domain_context_t *h)
252 {
253 const struct opt_feature *optf = &d->arch.opt_feature;
254 struct hvm_hw_ia64_identity_mappings im_save;
256 __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG,
257 &optf->im_reg4, &im_save.im_reg4);
258 __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG,
259 &optf->im_reg5, &im_save.im_reg5);
260 __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG,
261 &optf->im_reg7, &im_save.im_reg7);
263 return hvm_save_entry(OPT_FEATURE_IDENTITY_MAPPINGS, 0, h, &im_save);
264 }
266 static int __vmx_identity_mapping_load(struct domain *d, unsigned long cmd,
267 const struct hvm_hw_ia64_identity_mapping *im_load)
268 {
269 struct xen_ia64_opt_feature optf;
271 optf.cmd = cmd;
272 optf.on = im_load->on;
273 optf.pgprot = im_load->pgprot;
274 optf.key = im_load->key;
276 return domain_opt_feature(d, &optf);
277 }
279 static int vmx_identity_mappings_load(struct domain *d,
280 hvm_domain_context_t *h)
281 {
282 struct hvm_hw_ia64_identity_mappings im_load;
283 int rc;
285 if (hvm_load_entry(OPT_FEATURE_IDENTITY_MAPPINGS, h, &im_load))
286 return -EINVAL;
288 rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG4,
289 &im_load.im_reg4);
290 if (rc)
291 return rc;
292 rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG5,
293 &im_load.im_reg5);
294 if (rc)
295 return rc;
296 rc = __vmx_identity_mapping_load(d, XEN_IA64_OPTF_IDENT_MAP_REG7,
297 &im_load.im_reg7);
299 return rc;
300 }
302 HVM_REGISTER_SAVE_RESTORE(OPT_FEATURE_IDENTITY_MAPPINGS,
303 vmx_identity_mappings_save,
304 vmx_identity_mappings_load,
305 1, HVMSR_PER_DOM);
307 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
308 {
309 struct vcpu *v = current;
310 vcpu_iodata_t *vio;
311 ioreq_t *p;
313 vio = get_vio(v);
314 if (!vio)
315 panic_domain(NULL, "bad shared page\n");
317 p = &vio->vp_ioreq;
318 p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
319 p->size = s;
320 p->count = 1;
321 p->dir = dir;
322 if (dir == IOREQ_WRITE)
323 p->data = *val;
324 else
325 p->data = 0;
326 p->data_is_ptr = 0;
327 p->type = 0;
328 p->df = 0;
330 p->io_count++;
332 if (vmx_ide_pio_intercept(p, val))
333 return;
335 if (IS_ACPI_ADDR(p->addr) && vacpi_intercept(p, val))
336 return;
338 vmx_send_assist_req(v);
339 if (dir == IOREQ_READ) { // read
340 *val=p->data;
341 }
342 #ifdef DEBUG_PCI
343 if (dir == IOREQ_WRITE)
344 if (p->addr == 0xcf8UL)
345 printk("Write 0xcf8, with val [0x%lx]\n", p->data);
346 else
347 if (p->addr == 0xcfcUL)
348 printk("Read 0xcfc, with val [0x%lx]\n", p->data);
349 #endif //DEBUG_PCI
350 return;
351 }
353 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
354 {
355 unsigned long iot;
356 iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
358 perfc_incra(vmx_mmio_access, iot >> 56);
359 switch (iot) {
360 case GPFN_PIB:
361 if (ma != 4)
362 panic_domain(NULL, "Access PIB not with UC attribute\n");
364 if (!dir)
365 vlsapic_write(vcpu, src_pa, s, *dest);
366 else
367 *dest = vlsapic_read(vcpu, src_pa, s);
368 break;
369 case GPFN_GFW:
370 break;
371 case GPFN_IOSAPIC:
372 if (!dir)
373 viosapic_write(vcpu, src_pa, s, *dest);
374 else
375 *dest = viosapic_read(vcpu, src_pa, s);
376 break;
377 case GPFN_FRAME_BUFFER:
378 case GPFN_LOW_MMIO:
379 low_mmio_access(vcpu, src_pa, dest, s, dir);
380 break;
381 case GPFN_LEGACY_IO:
382 legacy_io_access(vcpu, src_pa, dest, s, dir);
383 break;
384 default:
385 panic_domain(NULL,"Bad I/O access\n");
386 break;
387 }
388 return;
389 }
391 /*
392 dir 1: read 0:write
393 */
394 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
395 {
396 REGS *regs;
397 IA64_BUNDLE bundle;
398 int slot, dir=0;
399 enum { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 } inst_type;
400 size_t size;
401 u64 data, data1, temp, update_reg;
402 s32 imm;
403 INST64 inst;
405 regs = vcpu_regs(vcpu);
406 if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
407 /* if fetch code fail, return and try again */
408 return;
409 }
410 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
411 if (!slot)
412 inst.inst = bundle.slot0;
413 else if (slot == 1) {
414 u64 slot1b = bundle.slot1b;
415 inst.inst = bundle.slot1a + (slot1b << 18);
416 }
417 else if (slot == 2)
418 inst.inst = bundle.slot2;
421 // Integer Load/Store
422 if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
423 inst_type = SL_INTEGER;
424 size = (inst.M1.x6 & 0x3);
425 if ((inst.M1.x6 >> 2) > 0xb) {
426 dir = IOREQ_WRITE;
427 vcpu_get_gr_nat(vcpu, inst.M4.r2, &data);
428 } else if ((inst.M1.x6 >> 2) < 0xb) {
429 dir = IOREQ_READ;
430 }
431 }
432 // Integer Load + Reg update
433 else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
434 inst_type = SL_INTEGER;
435 dir = IOREQ_READ;
436 size = (inst.M2.x6 & 0x3);
437 vcpu_get_gr_nat(vcpu, inst.M2.r3, &temp);
438 vcpu_get_gr_nat(vcpu, inst.M2.r2, &update_reg);
439 temp += update_reg;
440 vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
441 }
442 // Integer Load/Store + Imm update
443 else if (inst.M3.major == 5) {
444 inst_type = SL_INTEGER;
445 size = (inst.M3.x6 & 0x3);
446 if ((inst.M5.x6 >> 2) > 0xb) {
447 dir = IOREQ_WRITE;
448 vcpu_get_gr_nat(vcpu, inst.M5.r2, &data);
449 vcpu_get_gr_nat(vcpu, inst.M5.r3, &temp);
450 imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23);
451 temp += imm >> 23;
452 vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
453 } else if ((inst.M3.x6 >> 2) < 0xb) {
454 dir = IOREQ_READ;
455 vcpu_get_gr_nat(vcpu, inst.M3.r3, &temp);
456 imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23);
457 temp += imm >> 23;
458 vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
459 }
460 }
461 // Floating-point spill
462 else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B &&
463 inst.M9.m == 0 && inst.M9.x == 0) {
464 struct ia64_fpreg v;
466 inst_type = SL_FLOATING;
467 dir = IOREQ_WRITE;
468 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
469 data1 = v.u.bits[1] & 0x3ffff;
470 data = v.u.bits[0];
471 size = 4;
472 }
473 // Floating-point spill + Imm update
474 else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
475 struct ia64_fpreg v;
477 inst_type = SL_FLOATING;
478 dir = IOREQ_WRITE;
479 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
480 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
481 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
482 temp += imm >> 23;
483 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
484 data1 = v.u.bits[1] & 0x3ffff;
485 data = v.u.bits[0];
486 size = 4;
487 }
488 // Floating-point stf8 + Imm update
489 else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
490 struct ia64_fpreg v;
492 inst_type = SL_FLOATING;
493 dir = IOREQ_WRITE;
494 size = 3;
495 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
496 data = v.u.bits[0]; /* Significand. */
497 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
498 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
499 temp += imm >> 23;
500 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
501 }
502 // lfetch - do not perform accesses.
503 else if (inst.M15.major== 7 && inst.M15.x6 >=0x2c && inst.M15.x6 <= 0x2f) {
504 vcpu_get_gr_nat(vcpu, inst.M15.r3, &temp);
505 imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23);
506 temp += imm >> 23;
507 vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
509 vcpu_increment_iip(vcpu);
510 return;
511 }
512 // Floating-point Load Pair + Imm ldfp8 M12
513 else if (inst.M12.major == 6 && inst.M12.m == 1
514 && inst.M12.x == 1 && inst.M12.x6 == 1) {
515 inst_type = SL_FLOATING_FP8;
516 dir = IOREQ_READ;
517 size = 4; //ldfd
518 vcpu_set_gr(vcpu,inst.M12.r3,padr + 16, 0);
519 }
520 else {
521 panic_domain
522 (NULL, "This memory access instr can't be emulated: %lx pc=%lx\n",
523 inst.inst, regs->cr_iip);
524 }
526 if (size == 4) {
527 mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
528 size = 3;
529 }
530 mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
532 if (dir == IOREQ_READ) {
533 if (inst_type == SL_INTEGER) {
534 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
535 } else if (inst_type == SL_FLOATING_FP8) {
536 struct ia64_fpreg v;
538 v.u.bits[0] = data;
539 v.u.bits[1] = 0x1003E;
540 vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
541 v.u.bits[0] = data1;
542 v.u.bits[1] = 0x1003E;
543 vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
544 } else {
545 panic_domain(NULL, "Don't support ldfd now !");
546 }
547 }
548 vcpu_increment_iip(vcpu);
549 }