ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 12668:49ab12a04d16

[IA64] Make do_ssc() not fall into an infinite loop

Instead, call panic_domain().

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Thu Nov 30 15:58:21 2006 -0700 (2006-11-30)
parents fa0f2a90059f
children 1502ba048b73
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/xenmca.h>
27 #include <public/sched.h>
28 #include "hpsim_ssc.h"
29 #include <asm/vcpu.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/dom_fw.h>
32 #include <asm/uaccess.h>
33 #include <xen/console.h>
34 #include <xen/hypercall.h>
35 #include <xen/softirq.h>
37 static DEFINE_SPINLOCK(efi_time_services_lock);
39 extern unsigned long running_on_sim;
41 struct sal_mc_params {
42 u64 param_type;
43 u64 i_or_m;
44 u64 i_or_m_val;
45 u64 timeout;
46 u64 rz_always;
47 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
49 struct sal_vectors {
50 u64 vector_type;
51 u64 handler_addr1;
52 u64 gp1;
53 u64 handler_len1;
54 u64 handler_addr2;
55 u64 gp2;
56 u64 handler_len2;
57 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
59 struct smp_call_args_t {
60 u64 type;
61 u64 ret;
62 u64 target;
63 struct domain *domain;
64 int corrected;
65 int status;
66 void *data;
67 };
69 extern sal_log_record_header_t *sal_record;
70 DEFINE_SPINLOCK(sal_record_lock);
72 extern spinlock_t sal_queue_lock;
74 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
76 #if defined(IA64_SAL_DEBUG_INFO)
77 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
79 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
80 #else
81 # define IA64_SAL_DEBUG(fmt...)
82 #endif
84 void get_state_info_on(void *data) {
85 struct smp_call_args_t *arg = data;
86 int flags;
88 spin_lock_irqsave(&sal_record_lock, flags);
89 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
90 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
91 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
92 rec_name[arg->type], smp_processor_id(), arg->ret);
93 if (arg->corrected) {
94 sal_record->severity = sal_log_severity_corrected;
95 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
96 " force\n", __FUNCTION__);
97 }
98 if (arg->ret > 0) {
99 /*
100 * Save current->domain and set to local(caller) domain for
101 * xencomm_paddr_to_maddr() which calculates maddr from
102 * paddr using mpa value of current->domain.
103 */
104 struct domain *save;
105 save = current->domain;
106 current->domain = arg->domain;
107 if (xencomm_copy_to_guest((void*)arg->target,
108 sal_record, arg->ret, 0)) {
109 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
110 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
111 arg->ret = 0;
112 }
113 /* Restore current->domain to saved value. */
114 current->domain = save;
115 }
116 spin_unlock_irqrestore(&sal_record_lock, flags);
117 }
119 void clear_state_info_on(void *data) {
120 struct smp_call_args_t *arg = data;
122 arg->ret = ia64_sal_clear_state_info(arg->type);
123 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
124 rec_name[arg->type], smp_processor_id(), arg->ret);
126 }
128 struct sal_ret_values
129 sal_emulator (long index, unsigned long in1, unsigned long in2,
130 unsigned long in3, unsigned long in4, unsigned long in5,
131 unsigned long in6, unsigned long in7)
132 {
133 unsigned long r9 = 0;
134 unsigned long r10 = 0;
135 long r11 = 0;
136 long status;
138 status = 0;
139 switch (index) {
140 case SAL_FREQ_BASE:
141 if (!running_on_sim)
142 status = ia64_sal_freq_base(in1,&r9,&r10);
143 else switch (in1) {
144 case SAL_FREQ_BASE_PLATFORM:
145 r9 = 200000000;
146 break;
148 case SAL_FREQ_BASE_INTERVAL_TIMER:
149 r9 = 700000000;
150 break;
152 case SAL_FREQ_BASE_REALTIME_CLOCK:
153 r9 = 1;
154 break;
156 default:
157 status = -1;
158 break;
159 }
160 break;
161 case SAL_PCI_CONFIG_READ:
162 if (current->domain == dom0) {
163 u64 value;
164 // note that args 2&3 are swapped!!
165 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
166 r9 = value;
167 }
168 else
169 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
170 break;
171 case SAL_PCI_CONFIG_WRITE:
172 if (current->domain == dom0) {
173 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
174 (in4 > 1) ||
175 (in2 > 8) || (in2 & (in2-1)))
176 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
177 in1,in4,in2,in3);
178 // note that args are in a different order!!
179 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
180 }
181 else
182 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
183 break;
184 case SAL_SET_VECTORS:
185 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
186 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
187 /* Sanity check: cs_length1 must be 0,
188 second vector is reserved. */
189 status = -2;
190 }
191 else {
192 struct domain *d = current->domain;
193 d->arch.sal_data->boot_rdv_ip = in2;
194 d->arch.sal_data->boot_rdv_r1 = in3;
195 }
196 }
197 else
198 {
199 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
200 BUG();
201 sal_vectors[in1].vector_type = in1;
202 sal_vectors[in1].handler_addr1 = in2;
203 sal_vectors[in1].gp1 = in3;
204 sal_vectors[in1].handler_len1 = in4;
205 sal_vectors[in1].handler_addr2 = in5;
206 sal_vectors[in1].gp2 = in6;
207 sal_vectors[in1].handler_len2 = in7;
208 }
209 break;
210 case SAL_GET_STATE_INFO:
211 if (current->domain == dom0) {
212 sal_queue_entry_t *e;
213 unsigned long flags;
214 struct smp_call_args_t arg;
216 spin_lock_irqsave(&sal_queue_lock, flags);
217 if (!sal_queue || list_empty(&sal_queue[in1])) {
218 sal_log_record_header_t header;
219 XEN_GUEST_HANDLE(void) handle =
220 *(XEN_GUEST_HANDLE(void)*)&in3;
222 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
223 "no sal_queue entry found.\n",
224 rec_name[in1]);
225 memset(&header, 0, sizeof(header));
227 if (copy_to_guest(handle, &header, 1)) {
228 printk("sal_emulator: "
229 "SAL_GET_STATE_INFO can't copy "
230 "empty header to user: 0x%lx\n",
231 in3);
232 }
233 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
234 r9 = 0;
235 spin_unlock_irqrestore(&sal_queue_lock, flags);
236 break;
237 }
238 e = list_entry(sal_queue[in1].next,
239 sal_queue_entry_t, list);
240 spin_unlock_irqrestore(&sal_queue_lock, flags);
242 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
243 "on CPU#%d.\n",
244 rec_name[e->sal_info_type],
245 rec_name[in1], e->cpuid);
247 arg.type = e->sal_info_type;
248 arg.target = in3;
249 arg.corrected = !!((in1 != e->sal_info_type) &&
250 (e->sal_info_type == SAL_INFO_TYPE_MCA));
251 arg.domain = current->domain;
252 arg.status = 0;
254 if (e->cpuid == smp_processor_id()) {
255 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
256 get_state_info_on(&arg);
257 } else {
258 int ret;
259 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
260 ret = smp_call_function_single(e->cpuid,
261 get_state_info_on,
262 &arg, 0, 1);
263 if (ret < 0) {
264 printk("SAL_GET_STATE_INFO "
265 "smp_call_function_single error:"
266 " %d\n", ret);
267 arg.ret = 0;
268 arg.status =
269 IA64_SAL_NO_INFORMATION_AVAILABLE;
270 }
271 }
272 r9 = arg.ret;
273 status = arg.status;
274 if (r9 == 0) {
275 spin_lock_irqsave(&sal_queue_lock, flags);
276 list_del(&e->list);
277 spin_unlock_irqrestore(&sal_queue_lock, flags);
278 xfree(e);
279 }
280 } else {
281 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
282 r9 = 0;
283 }
284 break;
285 case SAL_GET_STATE_INFO_SIZE:
286 r9 = ia64_sal_get_state_info_size(in1);
287 break;
288 case SAL_CLEAR_STATE_INFO:
289 if (current->domain == dom0) {
290 sal_queue_entry_t *e;
291 unsigned long flags;
292 struct smp_call_args_t arg;
294 spin_lock_irqsave(&sal_queue_lock, flags);
295 if (list_empty(&sal_queue[in1])) {
296 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
297 "no sal_queue entry found.\n",
298 rec_name[in1]);
299 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
300 r9 = 0;
301 spin_unlock_irqrestore(&sal_queue_lock, flags);
302 break;
303 }
304 e = list_entry(sal_queue[in1].next,
305 sal_queue_entry_t, list);
307 list_del(&e->list);
308 spin_unlock_irqrestore(&sal_queue_lock, flags);
310 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
311 "on CPU#%d.\n",
312 rec_name[e->sal_info_type],
313 rec_name[in1], e->cpuid);
316 arg.type = e->sal_info_type;
317 arg.status = 0;
318 if (e->cpuid == smp_processor_id()) {
319 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
320 clear_state_info_on(&arg);
321 } else {
322 int ret;
323 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
324 ret = smp_call_function_single(e->cpuid,
325 clear_state_info_on, &arg, 0, 1);
326 if (ret < 0) {
327 printk("sal_emulator: "
328 "SAL_CLEAR_STATE_INFO "
329 "smp_call_function_single error:"
330 " %d\n", ret);
331 arg.ret = 0;
332 arg.status =
333 IA64_SAL_NO_INFORMATION_AVAILABLE;
334 }
335 }
336 r9 = arg.ret;
337 status = arg.status;
338 xfree(e);
339 }
340 break;
341 case SAL_MC_RENDEZ:
342 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
343 break;
344 case SAL_MC_SET_PARAMS:
345 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
346 BUG();
347 sal_mc_params[in1].param_type = in1;
348 sal_mc_params[in1].i_or_m = in2;
349 sal_mc_params[in1].i_or_m_val = in3;
350 sal_mc_params[in1].timeout = in4;
351 sal_mc_params[in1].rz_always = in5;
352 break;
353 case SAL_CACHE_FLUSH:
354 if (1) {
355 /* Flush using SAL.
356 This method is faster but has a side effect on
357 other vcpu running on this cpu. */
358 status = ia64_sal_cache_flush (in1);
359 }
360 else {
361 /* Flush with fc all the domain.
362 This method is slower but has no side effects. */
363 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
364 status = 0;
365 }
366 break;
367 case SAL_CACHE_INIT:
368 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
369 break;
370 case SAL_UPDATE_PAL:
371 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
372 break;
373 default:
374 printk("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
375 status = -1;
376 break;
377 }
378 return ((struct sal_ret_values) {status, r9, r10, r11});
379 }
381 struct ia64_pal_retval
382 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
383 {
384 unsigned long r9 = 0;
385 unsigned long r10 = 0;
386 unsigned long r11 = 0;
387 long status = PAL_STATUS_UNIMPLEMENTED;
389 if (running_on_sim)
390 return pal_emulator_static(index);
392 // pal code must be mapped by a TR when pal is called, however
393 // calls are rare enough that we will map it lazily rather than
394 // at every context switch
395 //efi_map_pal_code();
396 switch (index) {
397 case PAL_MEM_ATTRIB:
398 status = ia64_pal_mem_attrib(&r9);
399 break;
400 case PAL_FREQ_BASE:
401 status = ia64_pal_freq_base(&r9);
402 if (status == PAL_STATUS_UNIMPLEMENTED) {
403 status = ia64_sal_freq_base(0, &r9, &r10);
404 r10 = 0;
405 }
406 break;
407 case PAL_PROC_GET_FEATURES:
408 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
409 break;
410 case PAL_BUS_GET_FEATURES:
411 status = ia64_pal_bus_get_features(
412 (pal_bus_features_u_t *) &r9,
413 (pal_bus_features_u_t *) &r10,
414 (pal_bus_features_u_t *) &r11);
415 break;
416 case PAL_FREQ_RATIOS:
417 status = ia64_pal_freq_ratios(
418 (struct pal_freq_ratio *) &r9,
419 (struct pal_freq_ratio *) &r10,
420 (struct pal_freq_ratio *) &r11);
421 break;
422 case PAL_PTCE_INFO:
423 {
424 // return hard-coded xen-specific values because ptc.e
425 // is emulated on xen to always flush everything
426 // these values result in only one ptc.e instruction
427 status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
428 }
429 break;
430 case PAL_VERSION:
431 status = ia64_pal_version(
432 (pal_version_u_t *) &r9,
433 (pal_version_u_t *) &r10);
434 break;
435 case PAL_VM_PAGE_SIZE:
436 status = ia64_pal_vm_page_size(&r9,&r10);
437 break;
438 case PAL_DEBUG_INFO:
439 status = ia64_pal_debug_info(&r9,&r10);
440 break;
441 case PAL_CACHE_SUMMARY:
442 status = ia64_pal_cache_summary(&r9,&r10);
443 break;
444 case PAL_VM_SUMMARY:
445 if (VMX_DOMAIN(current)) {
446 pal_vm_info_1_u_t v1;
447 pal_vm_info_2_u_t v2;
448 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
449 (pal_vm_info_2_u_t *)&v2);
450 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
451 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
452 v2.pal_vm_info_2_s.impl_va_msb -= 1;
453 v2.pal_vm_info_2_s.rid_size =
454 current->domain->arch.rid_bits;
455 r9 = v1.pvi1_val;
456 r10 = v2.pvi2_val;
457 } else {
458 /* Use xen-specific values.
459 hash_tag_id is somewhat random! */
460 static const pal_vm_info_1_u_t v1 =
461 {.pal_vm_info_1_s =
462 { .vw = 1,
463 .phys_add_size = 44,
464 .key_size = 16,
465 .max_pkr = 15,
466 .hash_tag_id = 0x30,
467 .max_dtr_entry = NDTRS - 1,
468 .max_itr_entry = NITRS - 1,
469 #ifdef VHPT_GLOBAL
470 .max_unique_tcs = 3,
471 .num_tc_levels = 2
472 #else
473 .max_unique_tcs = 2,
474 .num_tc_levels = 1
475 #endif
476 }};
477 pal_vm_info_2_u_t v2;
478 v2.pvi2_val = 0;
479 v2.pal_vm_info_2_s.rid_size =
480 current->domain->arch.rid_bits;
481 v2.pal_vm_info_2_s.impl_va_msb = 50;
482 r9 = v1.pvi1_val;
483 r10 = v2.pvi2_val;
484 status = PAL_STATUS_SUCCESS;
485 }
486 break;
487 case PAL_VM_INFO:
488 if (VMX_DOMAIN(current)) {
489 status = ia64_pal_vm_info(in1, in2,
490 (pal_tc_info_u_t *)&r9, &r10);
491 break;
492 }
493 #ifdef VHPT_GLOBAL
494 if (in1 == 0 && in2 == 2) {
495 /* Level 1: VHPT */
496 const pal_tc_info_u_t v =
497 { .pal_tc_info_s = {.num_sets = 128,
498 .associativity = 1,
499 .num_entries = 128,
500 .pf = 1,
501 .unified = 1,
502 .reduce_tr = 0,
503 .reserved = 0}};
504 r9 = v.pti_val;
505 /* Only support PAGE_SIZE tc. */
506 r10 = PAGE_SIZE;
507 status = PAL_STATUS_SUCCESS;
508 }
509 #endif
510 else if (
511 #ifdef VHPT_GLOBAL
512 in1 == 1 /* Level 2. */
513 #else
514 in1 == 0 /* Level 1. */
515 #endif
516 && (in2 == 1 || in2 == 2))
517 {
518 /* itlb/dtlb, 1 entry. */
519 const pal_tc_info_u_t v =
520 { .pal_tc_info_s = {.num_sets = 1,
521 .associativity = 1,
522 .num_entries = 1,
523 .pf = 1,
524 .unified = 0,
525 .reduce_tr = 0,
526 .reserved = 0}};
527 r9 = v.pti_val;
528 /* Only support PAGE_SIZE tc. */
529 r10 = PAGE_SIZE;
530 status = PAL_STATUS_SUCCESS;
531 }
532 else
533 status = PAL_STATUS_EINVAL;
534 break;
535 case PAL_RSE_INFO:
536 status = ia64_pal_rse_info(
537 &r9,
538 (pal_hints_u_t *) &r10);
539 break;
540 case PAL_REGISTER_INFO:
541 status = ia64_pal_register_info(in1, &r9, &r10);
542 break;
543 case PAL_CACHE_FLUSH:
544 /* Always call Host Pal in int=0 */
545 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
547 /*
548 * Call Host PAL cache flush
549 * Clear psr.ic when call PAL_CACHE_FLUSH
550 */
551 r10 = in3;
552 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
554 if (status != 0)
555 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
556 "status %lx", status);
558 break;
559 case PAL_PERF_MON_INFO:
560 {
561 unsigned long pm_buffer[16];
562 status = ia64_pal_perf_mon_info(
563 pm_buffer,
564 (pal_perf_mon_info_u_t *) &r9);
565 if (status != 0) {
566 while(1)
567 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
568 break;
569 }
570 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
571 while(1)
572 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
573 "can't copy to user!!!!\n");
574 status = PAL_STATUS_UNIMPLEMENTED;
575 break;
576 }
577 }
578 break;
579 case PAL_CACHE_INFO:
580 {
581 pal_cache_config_info_t ci;
582 status = ia64_pal_cache_config_info(in1,in2,&ci);
583 if (status != 0) break;
584 r9 = ci.pcci_info_1.pcci1_data;
585 r10 = ci.pcci_info_2.pcci2_data;
586 }
587 break;
588 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
589 printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
590 break;
591 case PAL_HALT_INFO:
592 {
593 /* 1000 cycles to enter/leave low power state,
594 consumes 10 mW, implemented and cache/TLB coherent. */
595 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
596 | (1UL << 61) | (1UL << 60);
597 if (copy_to_user ((void *)in1, &res, sizeof (res)))
598 status = PAL_STATUS_EINVAL;
599 else
600 status = PAL_STATUS_SUCCESS;
601 }
602 break;
603 case PAL_HALT:
604 if (current->domain == dom0) {
605 printk ("Domain0 halts the machine\n");
606 console_start_sync();
607 (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
608 }
609 else
610 domain_shutdown(current->domain, SHUTDOWN_poweroff);
611 break;
612 case PAL_HALT_LIGHT:
613 if (VMX_DOMAIN(current)) {
614 /* Called by VTI. */
615 if (!is_unmasked_irq(current)) {
616 do_sched_op_compat(SCHEDOP_block, 0);
617 do_softirq();
618 }
619 status = PAL_STATUS_SUCCESS;
620 }
621 break;
622 case PAL_PLATFORM_ADDR:
623 if (VMX_DOMAIN(current))
624 status = PAL_STATUS_SUCCESS;
625 break;
626 default:
627 printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
628 index);
629 break;
630 }
631 return ((struct ia64_pal_retval) {status, r9, r10, r11});
632 }
634 // given a current domain (virtual or metaphysical) address, return the virtual address
635 static unsigned long
636 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
637 struct page_info** page)
638 {
639 struct vcpu *v = current;
640 unsigned long mpaddr = domain_addr;
641 unsigned long virt;
642 *fault = IA64_NO_FAULT;
644 again:
645 if (v->domain->arch.sal_data->efi_virt_mode) {
646 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
647 if (*fault != IA64_NO_FAULT) return 0;
648 }
650 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
651 *page = virt_to_page(virt);
652 if (get_page(*page, current->domain) == 0) {
653 if (page_get_owner(*page) != current->domain) {
654 // which code is appropriate?
655 *fault = IA64_FAULT;
656 return 0;
657 }
658 goto again;
659 }
661 return virt;
662 }
664 static efi_status_t
665 efi_emulate_get_time(
666 unsigned long tv_addr, unsigned long tc_addr,
667 IA64FAULT *fault)
668 {
669 unsigned long tv, tc = 0;
670 struct page_info *tv_page = NULL;
671 struct page_info *tc_page = NULL;
672 efi_status_t status = 0;
674 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
675 if (*fault != IA64_NO_FAULT)
676 goto errout;
677 if (tc_addr) {
678 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
679 if (*fault != IA64_NO_FAULT)
680 goto errout;
681 }
683 spin_lock(&efi_time_services_lock);
684 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
685 spin_unlock(&efi_time_services_lock);
687 errout:
688 if (tc_page != NULL)
689 put_page(tc_page);
690 if (tv_page != NULL)
691 put_page(tv_page);
693 return status;
694 }
696 static efi_status_t
697 efi_emulate_set_time(
698 unsigned long tv_addr, IA64FAULT *fault)
699 {
700 unsigned long tv;
701 struct page_info *tv_page = NULL;
702 efi_status_t status = 0;
704 if (current->domain != dom0)
705 return EFI_UNSUPPORTED;
707 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
708 if (*fault != IA64_NO_FAULT)
709 goto errout;
711 spin_lock(&efi_time_services_lock);
712 status = (*efi.set_time)((efi_time_t *)tv);
713 spin_unlock(&efi_time_services_lock);
715 errout:
716 if (tv_page != NULL)
717 put_page(tv_page);
719 return status;
720 }
722 static efi_status_t
723 efi_emulate_get_wakeup_time(
724 unsigned long e_addr, unsigned long p_addr,
725 unsigned long tv_addr, IA64FAULT *fault)
726 {
727 unsigned long enabled, pending, tv;
728 struct page_info *e_page = NULL, *p_page = NULL,
729 *tv_page = NULL;
730 efi_status_t status = 0;
732 if (current->domain != dom0)
733 return EFI_UNSUPPORTED;
735 if (!e_addr || !p_addr || !tv_addr)
736 return EFI_INVALID_PARAMETER;
738 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
739 if (*fault != IA64_NO_FAULT)
740 goto errout;
741 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
742 if (*fault != IA64_NO_FAULT)
743 goto errout;
744 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
745 if (*fault != IA64_NO_FAULT)
746 goto errout;
748 spin_lock(&efi_time_services_lock);
749 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
750 (efi_bool_t *)pending,
751 (efi_time_t *)tv);
752 spin_unlock(&efi_time_services_lock);
754 errout:
755 if (e_page != NULL)
756 put_page(e_page);
757 if (p_page != NULL)
758 put_page(p_page);
759 if (tv_page != NULL)
760 put_page(tv_page);
762 return status;
763 }
765 static efi_status_t
766 efi_emulate_set_wakeup_time(
767 unsigned long enabled, unsigned long tv_addr,
768 IA64FAULT *fault)
769 {
770 unsigned long tv = 0;
771 struct page_info *tv_page = NULL;
772 efi_status_t status = 0;
774 if (current->domain != dom0)
775 return EFI_UNSUPPORTED;
777 if (tv_addr) {
778 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
779 if (*fault != IA64_NO_FAULT)
780 goto errout;
781 }
783 spin_lock(&efi_time_services_lock);
784 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
785 (efi_time_t *)tv);
786 spin_unlock(&efi_time_services_lock);
788 errout:
789 if (tv_page != NULL)
790 put_page(tv_page);
792 return status;
793 }
795 static efi_status_t
796 efi_emulate_get_variable(
797 unsigned long name_addr, unsigned long vendor_addr,
798 unsigned long attr_addr, unsigned long data_size_addr,
799 unsigned long data_addr, IA64FAULT *fault)
800 {
801 unsigned long name, vendor, attr = 0, data_size, data;
802 struct page_info *name_page = NULL, *vendor_page = NULL,
803 *attr_page = NULL, *data_size_page = NULL,
804 *data_page = NULL;
805 efi_status_t status = 0;
807 if (current->domain != dom0)
808 return EFI_UNSUPPORTED;
810 name = efi_translate_domain_addr(name_addr, fault, &name_page);
811 if (*fault != IA64_NO_FAULT)
812 goto errout;
813 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
814 if (*fault != IA64_NO_FAULT)
815 goto errout;
816 data_size = efi_translate_domain_addr(data_size_addr, fault,
817 &data_size_page);
818 if (*fault != IA64_NO_FAULT)
819 goto errout;
820 data = efi_translate_domain_addr(data_addr, fault, &data_page);
821 if (*fault != IA64_NO_FAULT)
822 goto errout;
823 if (attr_addr) {
824 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
825 if (*fault != IA64_NO_FAULT)
826 goto errout;
827 }
829 status = (*efi.get_variable)((efi_char16_t *)name,
830 (efi_guid_t *)vendor,
831 (u32 *)attr,
832 (unsigned long *)data_size,
833 (void *)data);
835 errout:
836 if (name_page != NULL)
837 put_page(name_page);
838 if (vendor_page != NULL)
839 put_page(vendor_page);
840 if (attr_page != NULL)
841 put_page(attr_page);
842 if (data_size_page != NULL)
843 put_page(data_size_page);
844 if (data_page != NULL)
845 put_page(data_page);
847 return status;
848 }
850 static efi_status_t
851 efi_emulate_get_next_variable(
852 unsigned long name_size_addr, unsigned long name_addr,
853 unsigned long vendor_addr, IA64FAULT *fault)
854 {
855 unsigned long name_size, name, vendor;
856 struct page_info *name_size_page = NULL, *name_page = NULL,
857 *vendor_page = NULL;
858 efi_status_t status = 0;
860 if (current->domain != dom0)
861 return EFI_UNSUPPORTED;
863 name_size = efi_translate_domain_addr(name_size_addr, fault,
864 &name_size_page);
865 if (*fault != IA64_NO_FAULT)
866 goto errout;
867 name = efi_translate_domain_addr(name_addr, fault, &name_page);
868 if (*fault != IA64_NO_FAULT)
869 goto errout;
870 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
871 if (*fault != IA64_NO_FAULT)
872 goto errout;
874 status = (*efi.get_next_variable)((unsigned long *)name_size,
875 (efi_char16_t *)name,
876 (efi_guid_t *)vendor);
878 errout:
879 if (name_size_page != NULL)
880 put_page(name_size_page);
881 if (name_page != NULL)
882 put_page(name_page);
883 if (vendor_page != NULL)
884 put_page(vendor_page);
886 return status;
887 }
889 static efi_status_t
890 efi_emulate_set_variable(
891 unsigned long name_addr, unsigned long vendor_addr,
892 unsigned long attr, unsigned long data_size,
893 unsigned long data_addr, IA64FAULT *fault)
894 {
895 unsigned long name, vendor, data;
896 struct page_info *name_page = NULL, *vendor_page = NULL,
897 *data_page = NULL;
898 efi_status_t status = 0;
900 if (current->domain != dom0)
901 return EFI_UNSUPPORTED;
903 name = efi_translate_domain_addr(name_addr, fault, &name_page);
904 if (*fault != IA64_NO_FAULT)
905 goto errout;
906 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
907 if (*fault != IA64_NO_FAULT)
908 goto errout;
909 data = efi_translate_domain_addr(data_addr, fault, &data_page);
910 if (*fault != IA64_NO_FAULT)
911 goto errout;
913 status = (*efi.set_variable)((efi_char16_t *)name,
914 (efi_guid_t *)vendor,
915 attr,
916 data_size,
917 (void *)data);
919 errout:
920 if (name_page != NULL)
921 put_page(name_page);
922 if (vendor_page != NULL)
923 put_page(vendor_page);
924 if (data_page != NULL)
925 put_page(data_page);
927 return status;
928 }
930 static efi_status_t
931 efi_emulate_set_virtual_address_map(
932 unsigned long memory_map_size, unsigned long descriptor_size,
933 u32 descriptor_version, efi_memory_desc_t *virtual_map)
934 {
935 void *efi_map_start, *efi_map_end, *p;
936 efi_memory_desc_t entry, *md = &entry;
937 u64 efi_desc_size;
939 unsigned long *vfn;
940 struct domain *d = current->domain;
941 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
942 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
944 if (descriptor_version != EFI_MEMDESC_VERSION) {
945 printk ("efi_emulate_set_virtual_address_map: memory "
946 "descriptor version unmatched (%d vs %d)\n",
947 (int)descriptor_version, EFI_MEMDESC_VERSION);
948 return EFI_INVALID_PARAMETER;
949 }
951 if (descriptor_size != sizeof(efi_memory_desc_t)) {
952 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
953 return EFI_INVALID_PARAMETER;
954 }
956 if (d->arch.sal_data->efi_virt_mode)
957 return EFI_UNSUPPORTED;
959 efi_map_start = virtual_map;
960 efi_map_end = efi_map_start + memory_map_size;
961 efi_desc_size = sizeof(efi_memory_desc_t);
963 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
964 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
965 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
966 return EFI_UNSUPPORTED;
967 }
969 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
970 if (md->type != EFI_PAL_CODE)
971 continue;
973 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
974 do { \
975 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
976 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
977 *vfn++ = 0; \
978 } while (0)
980 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
981 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
982 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
983 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
984 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
985 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
986 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
987 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
988 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
989 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
991 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
992 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
993 *vfn = 0;
994 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
995 break;
996 }
998 /* The virtual address map has been applied. */
999 d->arch.sal_data->efi_virt_mode = 1;
1001 return EFI_SUCCESS;
1004 efi_status_t
1005 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1007 struct vcpu *v = current;
1008 efi_status_t status;
1010 *fault = IA64_NO_FAULT;
1012 switch (regs->r2) {
1013 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1015 u8 reason;
1016 unsigned long val = vcpu_get_gr(v,32);
1017 switch (val)
1019 case EFI_RESET_SHUTDOWN:
1020 reason = SHUTDOWN_poweroff;
1021 break;
1022 case EFI_RESET_COLD:
1023 case EFI_RESET_WARM:
1024 default:
1025 reason = SHUTDOWN_reboot;
1026 break;
1028 domain_shutdown (current->domain, reason);
1030 status = EFI_UNSUPPORTED;
1031 break;
1032 case FW_HYPERCALL_EFI_GET_TIME:
1033 status = efi_emulate_get_time (
1034 vcpu_get_gr(v,32),
1035 vcpu_get_gr(v,33),
1036 fault);
1037 break;
1038 case FW_HYPERCALL_EFI_SET_TIME:
1039 status = efi_emulate_set_time (
1040 vcpu_get_gr(v,32),
1041 fault);
1042 break;
1043 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1044 status = efi_emulate_get_wakeup_time (
1045 vcpu_get_gr(v,32),
1046 vcpu_get_gr(v,33),
1047 vcpu_get_gr(v,34),
1048 fault);
1049 break;
1050 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1051 status = efi_emulate_set_wakeup_time (
1052 vcpu_get_gr(v,32),
1053 vcpu_get_gr(v,33),
1054 fault);
1055 break;
1056 case FW_HYPERCALL_EFI_GET_VARIABLE:
1057 status = efi_emulate_get_variable (
1058 vcpu_get_gr(v,32),
1059 vcpu_get_gr(v,33),
1060 vcpu_get_gr(v,34),
1061 vcpu_get_gr(v,35),
1062 vcpu_get_gr(v,36),
1063 fault);
1064 break;
1065 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1066 status = efi_emulate_get_next_variable (
1067 vcpu_get_gr(v,32),
1068 vcpu_get_gr(v,33),
1069 vcpu_get_gr(v,34),
1070 fault);
1071 break;
1072 case FW_HYPERCALL_EFI_SET_VARIABLE:
1073 status = efi_emulate_set_variable (
1074 vcpu_get_gr(v,32),
1075 vcpu_get_gr(v,33),
1076 vcpu_get_gr(v,34),
1077 vcpu_get_gr(v,35),
1078 vcpu_get_gr(v,36),
1079 fault);
1080 break;
1081 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1082 status = efi_emulate_set_virtual_address_map (
1083 vcpu_get_gr(v,32),
1084 vcpu_get_gr(v,33),
1085 (u32) vcpu_get_gr(v,34),
1086 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1087 break;
1088 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1089 // FIXME: need fixes in efi.h from 2.6.9
1090 status = EFI_UNSUPPORTED;
1091 break;
1092 default:
1093 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1094 status = EFI_UNSUPPORTED;
1097 return status;
1100 void
1101 do_ssc(unsigned long ssc, struct pt_regs *regs)
1103 unsigned long arg0, arg1, arg2, arg3, retval;
1104 char buf[2];
1105 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1106 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1107 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1109 arg0 = vcpu_get_gr(current,32);
1110 switch(ssc) {
1111 case SSC_PUTCHAR:
1112 buf[0] = arg0;
1113 buf[1] = '\0';
1114 printk(buf);
1115 break;
1116 case SSC_GETCHAR:
1117 retval = ia64_ssc(0,0,0,0,ssc);
1118 vcpu_set_gr(current,8,retval,0);
1119 break;
1120 case SSC_WAIT_COMPLETION:
1121 if (arg0) { // metaphysical address
1123 arg0 = translate_domain_mpaddr(arg0, NULL);
1124 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1125 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1126 /**/ stat->count = last_count;
1127 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1128 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1129 /**/ retval = 0;
1131 else retval = -1L;
1132 vcpu_set_gr(current,8,retval,0);
1133 break;
1134 case SSC_OPEN:
1135 arg1 = vcpu_get_gr(current,33); // access rights
1136 if (!running_on_sim) { printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
1137 if (arg0) { // metaphysical address
1138 arg0 = translate_domain_mpaddr(arg0, NULL);
1139 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1141 else retval = -1L;
1142 vcpu_set_gr(current,8,retval,0);
1143 break;
1144 case SSC_WRITE:
1145 case SSC_READ:
1146 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1147 arg1 = vcpu_get_gr(current,33);
1148 arg2 = vcpu_get_gr(current,34);
1149 arg3 = vcpu_get_gr(current,35);
1150 if (arg2) { // metaphysical address of descriptor
1151 struct ssc_disk_req *req;
1152 unsigned long mpaddr;
1153 long len;
1155 arg2 = translate_domain_mpaddr(arg2, NULL);
1156 req = (struct ssc_disk_req *) __va(arg2);
1157 req->len &= 0xffffffffL; // avoid strange bug
1158 len = req->len;
1159 /**/ last_fd = arg1;
1160 /**/ last_count = len;
1161 mpaddr = req->addr;
1162 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1163 retval = 0;
1164 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1165 // do partial page first
1166 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1167 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1168 len -= req->len; mpaddr += req->len;
1169 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1170 arg3 += req->len; // file offset
1171 /**/ last_stat.fd = last_fd;
1172 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1173 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1175 if (retval >= 0) while (len > 0) {
1176 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1177 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1178 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1179 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1180 arg3 += req->len; // file offset
1181 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1182 /**/ last_stat.fd = last_fd;
1183 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1184 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1186 // set it back to the original value
1187 req->len = last_count;
1189 else retval = -1L;
1190 vcpu_set_gr(current,8,retval,0);
1191 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1192 break;
1193 case SSC_CONNECT_INTERRUPT:
1194 arg1 = vcpu_get_gr(current,33);
1195 arg2 = vcpu_get_gr(current,34);
1196 arg3 = vcpu_get_gr(current,35);
1197 if (!running_on_sim) { printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
1198 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1199 break;
1200 case SSC_NETDEV_PROBE:
1201 vcpu_set_gr(current,8,-1L,0);
1202 break;
1203 default:
1204 panic_domain(regs,
1205 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1206 __func__, ssc, regs->cr_iip, regs->b0);
1207 break;
1209 vcpu_increment_iip(current);