ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 13902:5982d478698f

[IA64] Add localtime setting for PV/IA64 domain

Signed-off-by: Atsushi SAKAI <sakaia@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Mon Feb 12 10:06:46 2007 -0700 (2007-02-12)
parents 5b99d19906a7
children 2b3dd681dbce
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/xenmca.h>
27 #include <public/sched.h>
28 #include "hpsim_ssc.h"
29 #include <asm/vcpu.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/dom_fw.h>
32 #include <asm/uaccess.h>
33 #include <xen/console.h>
34 #include <xen/hypercall.h>
35 #include <xen/softirq.h>
36 #include <xen/time.h>
38 static DEFINE_SPINLOCK(efi_time_services_lock);
40 extern unsigned long running_on_sim;
42 struct sal_mc_params {
43 u64 param_type;
44 u64 i_or_m;
45 u64 i_or_m_val;
46 u64 timeout;
47 u64 rz_always;
48 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
50 struct sal_vectors {
51 u64 vector_type;
52 u64 handler_addr1;
53 u64 gp1;
54 u64 handler_len1;
55 u64 handler_addr2;
56 u64 gp2;
57 u64 handler_len2;
58 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
60 struct smp_call_args_t {
61 u64 type;
62 u64 ret;
63 u64 target;
64 struct domain *domain;
65 int corrected;
66 int status;
67 void *data;
68 };
70 extern sal_log_record_header_t *sal_record;
71 DEFINE_SPINLOCK(sal_record_lock);
73 extern spinlock_t sal_queue_lock;
75 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
77 #if defined(IA64_SAL_DEBUG_INFO)
78 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
80 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
81 #else
82 # define IA64_SAL_DEBUG(fmt...)
83 #endif
85 void get_state_info_on(void *data) {
86 struct smp_call_args_t *arg = data;
87 int flags;
89 spin_lock_irqsave(&sal_record_lock, flags);
90 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
91 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
92 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
93 rec_name[arg->type], smp_processor_id(), arg->ret);
94 if (arg->corrected) {
95 sal_record->severity = sal_log_severity_corrected;
96 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
97 " force\n", __FUNCTION__);
98 }
99 if (arg->ret > 0) {
100 /*
101 * Save current->domain and set to local(caller) domain for
102 * xencomm_paddr_to_maddr() which calculates maddr from
103 * paddr using mpa value of current->domain.
104 */
105 struct domain *save;
106 save = current->domain;
107 current->domain = arg->domain;
108 if (xencomm_copy_to_guest((void*)arg->target,
109 sal_record, arg->ret, 0)) {
110 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
111 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
112 arg->ret = 0;
113 }
114 /* Restore current->domain to saved value. */
115 current->domain = save;
116 }
117 spin_unlock_irqrestore(&sal_record_lock, flags);
118 }
120 void clear_state_info_on(void *data) {
121 struct smp_call_args_t *arg = data;
123 arg->ret = ia64_sal_clear_state_info(arg->type);
124 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
125 rec_name[arg->type], smp_processor_id(), arg->ret);
127 }
129 struct sal_ret_values
130 sal_emulator (long index, unsigned long in1, unsigned long in2,
131 unsigned long in3, unsigned long in4, unsigned long in5,
132 unsigned long in6, unsigned long in7)
133 {
134 unsigned long r9 = 0;
135 unsigned long r10 = 0;
136 long r11 = 0;
137 long status;
139 status = 0;
140 switch (index) {
141 case SAL_FREQ_BASE:
142 if (!running_on_sim)
143 status = ia64_sal_freq_base(in1,&r9,&r10);
144 else switch (in1) {
145 case SAL_FREQ_BASE_PLATFORM:
146 r9 = 200000000;
147 break;
149 case SAL_FREQ_BASE_INTERVAL_TIMER:
150 r9 = 700000000;
151 break;
153 case SAL_FREQ_BASE_REALTIME_CLOCK:
154 r9 = 1;
155 break;
157 default:
158 status = -1;
159 break;
160 }
161 break;
162 case SAL_PCI_CONFIG_READ:
163 if (current->domain == dom0) {
164 u64 value;
165 // note that args 2&3 are swapped!!
166 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
167 r9 = value;
168 }
169 else
170 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
171 break;
172 case SAL_PCI_CONFIG_WRITE:
173 if (current->domain == dom0) {
174 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
175 (in4 > 1) ||
176 (in2 > 8) || (in2 & (in2-1)))
177 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
178 in1,in4,in2,in3);
179 // note that args are in a different order!!
180 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
181 }
182 else
183 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
184 break;
185 case SAL_SET_VECTORS:
186 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
187 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
188 /* Sanity check: cs_length1 must be 0,
189 second vector is reserved. */
190 status = -2;
191 }
192 else {
193 struct domain *d = current->domain;
194 d->arch.sal_data->boot_rdv_ip = in2;
195 d->arch.sal_data->boot_rdv_r1 = in3;
196 }
197 }
198 else
199 {
200 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
201 BUG();
202 sal_vectors[in1].vector_type = in1;
203 sal_vectors[in1].handler_addr1 = in2;
204 sal_vectors[in1].gp1 = in3;
205 sal_vectors[in1].handler_len1 = in4;
206 sal_vectors[in1].handler_addr2 = in5;
207 sal_vectors[in1].gp2 = in6;
208 sal_vectors[in1].handler_len2 = in7;
209 }
210 break;
211 case SAL_GET_STATE_INFO:
212 if (current->domain == dom0) {
213 sal_queue_entry_t *e;
214 unsigned long flags;
215 struct smp_call_args_t arg;
217 spin_lock_irqsave(&sal_queue_lock, flags);
218 if (!sal_queue || list_empty(&sal_queue[in1])) {
219 sal_log_record_header_t header;
220 XEN_GUEST_HANDLE(void) handle =
221 *(XEN_GUEST_HANDLE(void)*)&in3;
223 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
224 "no sal_queue entry found.\n",
225 rec_name[in1]);
226 memset(&header, 0, sizeof(header));
228 if (copy_to_guest(handle, &header, 1)) {
229 printk("sal_emulator: "
230 "SAL_GET_STATE_INFO can't copy "
231 "empty header to user: 0x%lx\n",
232 in3);
233 }
234 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
235 r9 = 0;
236 spin_unlock_irqrestore(&sal_queue_lock, flags);
237 break;
238 }
239 e = list_entry(sal_queue[in1].next,
240 sal_queue_entry_t, list);
241 spin_unlock_irqrestore(&sal_queue_lock, flags);
243 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
244 "on CPU#%d.\n",
245 rec_name[e->sal_info_type],
246 rec_name[in1], e->cpuid);
248 arg.type = e->sal_info_type;
249 arg.target = in3;
250 arg.corrected = !!((in1 != e->sal_info_type) &&
251 (e->sal_info_type == SAL_INFO_TYPE_MCA));
252 arg.domain = current->domain;
253 arg.status = 0;
255 if (e->cpuid == smp_processor_id()) {
256 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
257 get_state_info_on(&arg);
258 } else {
259 int ret;
260 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
261 ret = smp_call_function_single(e->cpuid,
262 get_state_info_on,
263 &arg, 0, 1);
264 if (ret < 0) {
265 printk("SAL_GET_STATE_INFO "
266 "smp_call_function_single error:"
267 " %d\n", ret);
268 arg.ret = 0;
269 arg.status =
270 IA64_SAL_NO_INFORMATION_AVAILABLE;
271 }
272 }
273 r9 = arg.ret;
274 status = arg.status;
275 if (r9 == 0) {
276 spin_lock_irqsave(&sal_queue_lock, flags);
277 list_del(&e->list);
278 spin_unlock_irqrestore(&sal_queue_lock, flags);
279 xfree(e);
280 }
281 } else {
282 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
283 r9 = 0;
284 }
285 break;
286 case SAL_GET_STATE_INFO_SIZE:
287 r9 = ia64_sal_get_state_info_size(in1);
288 break;
289 case SAL_CLEAR_STATE_INFO:
290 if (current->domain == dom0) {
291 sal_queue_entry_t *e;
292 unsigned long flags;
293 struct smp_call_args_t arg;
295 spin_lock_irqsave(&sal_queue_lock, flags);
296 if (list_empty(&sal_queue[in1])) {
297 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
298 "no sal_queue entry found.\n",
299 rec_name[in1]);
300 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
301 r9 = 0;
302 spin_unlock_irqrestore(&sal_queue_lock, flags);
303 break;
304 }
305 e = list_entry(sal_queue[in1].next,
306 sal_queue_entry_t, list);
308 list_del(&e->list);
309 spin_unlock_irqrestore(&sal_queue_lock, flags);
311 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
312 "on CPU#%d.\n",
313 rec_name[e->sal_info_type],
314 rec_name[in1], e->cpuid);
317 arg.type = e->sal_info_type;
318 arg.status = 0;
319 if (e->cpuid == smp_processor_id()) {
320 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
321 clear_state_info_on(&arg);
322 } else {
323 int ret;
324 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
325 ret = smp_call_function_single(e->cpuid,
326 clear_state_info_on, &arg, 0, 1);
327 if (ret < 0) {
328 printk("sal_emulator: "
329 "SAL_CLEAR_STATE_INFO "
330 "smp_call_function_single error:"
331 " %d\n", ret);
332 arg.ret = 0;
333 arg.status =
334 IA64_SAL_NO_INFORMATION_AVAILABLE;
335 }
336 }
337 r9 = arg.ret;
338 status = arg.status;
339 xfree(e);
340 }
341 break;
342 case SAL_MC_RENDEZ:
343 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
344 break;
345 case SAL_MC_SET_PARAMS:
346 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
347 BUG();
348 sal_mc_params[in1].param_type = in1;
349 sal_mc_params[in1].i_or_m = in2;
350 sal_mc_params[in1].i_or_m_val = in3;
351 sal_mc_params[in1].timeout = in4;
352 sal_mc_params[in1].rz_always = in5;
353 break;
354 case SAL_CACHE_FLUSH:
355 if (1) {
356 /* Flush using SAL.
357 This method is faster but has a side effect on
358 other vcpu running on this cpu. */
359 status = ia64_sal_cache_flush (in1);
360 }
361 else {
362 /* Flush with fc all the domain.
363 This method is slower but has no side effects. */
364 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
365 status = 0;
366 }
367 break;
368 case SAL_CACHE_INIT:
369 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
370 break;
371 case SAL_UPDATE_PAL:
372 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
373 break;
374 default:
375 printk("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
376 status = -1;
377 break;
378 }
379 return ((struct sal_ret_values) {status, r9, r10, r11});
380 }
382 struct ia64_pal_retval
383 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
384 {
385 unsigned long r9 = 0;
386 unsigned long r10 = 0;
387 unsigned long r11 = 0;
388 long status = PAL_STATUS_UNIMPLEMENTED;
390 if (running_on_sim)
391 return pal_emulator_static(index);
393 // pal code must be mapped by a TR when pal is called, however
394 // calls are rare enough that we will map it lazily rather than
395 // at every context switch
396 //efi_map_pal_code();
397 switch (index) {
398 case PAL_MEM_ATTRIB:
399 status = ia64_pal_mem_attrib(&r9);
400 break;
401 case PAL_FREQ_BASE:
402 status = ia64_pal_freq_base(&r9);
403 if (status == PAL_STATUS_UNIMPLEMENTED) {
404 status = ia64_sal_freq_base(0, &r9, &r10);
405 r10 = 0;
406 }
407 break;
408 case PAL_PROC_GET_FEATURES:
409 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
410 break;
411 case PAL_BUS_GET_FEATURES:
412 status = ia64_pal_bus_get_features(
413 (pal_bus_features_u_t *) &r9,
414 (pal_bus_features_u_t *) &r10,
415 (pal_bus_features_u_t *) &r11);
416 break;
417 case PAL_FREQ_RATIOS:
418 status = ia64_pal_freq_ratios(
419 (struct pal_freq_ratio *) &r9,
420 (struct pal_freq_ratio *) &r10,
421 (struct pal_freq_ratio *) &r11);
422 break;
423 case PAL_PTCE_INFO:
424 {
425 // return hard-coded xen-specific values because ptc.e
426 // is emulated on xen to always flush everything
427 // these values result in only one ptc.e instruction
428 status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
429 }
430 break;
431 case PAL_VERSION:
432 status = ia64_pal_version(
433 (pal_version_u_t *) &r9,
434 (pal_version_u_t *) &r10);
435 break;
436 case PAL_VM_PAGE_SIZE:
437 status = ia64_pal_vm_page_size(&r9,&r10);
438 break;
439 case PAL_DEBUG_INFO:
440 status = ia64_pal_debug_info(&r9,&r10);
441 break;
442 case PAL_CACHE_SUMMARY:
443 status = ia64_pal_cache_summary(&r9,&r10);
444 break;
445 case PAL_VM_SUMMARY:
446 if (VMX_DOMAIN(current)) {
447 pal_vm_info_1_u_t v1;
448 pal_vm_info_2_u_t v2;
449 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
450 (pal_vm_info_2_u_t *)&v2);
451 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
452 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
453 v2.pal_vm_info_2_s.impl_va_msb -= 1;
454 v2.pal_vm_info_2_s.rid_size =
455 current->domain->arch.rid_bits;
456 r9 = v1.pvi1_val;
457 r10 = v2.pvi2_val;
458 } else {
459 /* Use xen-specific values.
460 hash_tag_id is somewhat random! */
461 static const pal_vm_info_1_u_t v1 =
462 {.pal_vm_info_1_s =
463 { .vw = 1,
464 .phys_add_size = 44,
465 .key_size = 16,
466 .max_pkr = 15,
467 .hash_tag_id = 0x30,
468 .max_dtr_entry = NDTRS - 1,
469 .max_itr_entry = NITRS - 1,
470 #ifdef VHPT_GLOBAL
471 .max_unique_tcs = 3,
472 .num_tc_levels = 2
473 #else
474 .max_unique_tcs = 2,
475 .num_tc_levels = 1
476 #endif
477 }};
478 pal_vm_info_2_u_t v2;
479 v2.pvi2_val = 0;
480 v2.pal_vm_info_2_s.rid_size =
481 current->domain->arch.rid_bits;
482 v2.pal_vm_info_2_s.impl_va_msb = 50;
483 r9 = v1.pvi1_val;
484 r10 = v2.pvi2_val;
485 status = PAL_STATUS_SUCCESS;
486 }
487 break;
488 case PAL_VM_INFO:
489 if (VMX_DOMAIN(current)) {
490 status = ia64_pal_vm_info(in1, in2,
491 (pal_tc_info_u_t *)&r9, &r10);
492 break;
493 }
494 #ifdef VHPT_GLOBAL
495 if (in1 == 0 && in2 == 2) {
496 /* Level 1: VHPT */
497 const pal_tc_info_u_t v =
498 { .pal_tc_info_s = {.num_sets = 128,
499 .associativity = 1,
500 .num_entries = 128,
501 .pf = 1,
502 .unified = 1,
503 .reduce_tr = 0,
504 .reserved = 0}};
505 r9 = v.pti_val;
506 /* Only support PAGE_SIZE tc. */
507 r10 = PAGE_SIZE;
508 status = PAL_STATUS_SUCCESS;
509 }
510 #endif
511 else if (
512 #ifdef VHPT_GLOBAL
513 in1 == 1 /* Level 2. */
514 #else
515 in1 == 0 /* Level 1. */
516 #endif
517 && (in2 == 1 || in2 == 2))
518 {
519 /* itlb/dtlb, 1 entry. */
520 const pal_tc_info_u_t v =
521 { .pal_tc_info_s = {.num_sets = 1,
522 .associativity = 1,
523 .num_entries = 1,
524 .pf = 1,
525 .unified = 0,
526 .reduce_tr = 0,
527 .reserved = 0}};
528 r9 = v.pti_val;
529 /* Only support PAGE_SIZE tc. */
530 r10 = PAGE_SIZE;
531 status = PAL_STATUS_SUCCESS;
532 }
533 else
534 status = PAL_STATUS_EINVAL;
535 break;
536 case PAL_RSE_INFO:
537 status = ia64_pal_rse_info(
538 &r9,
539 (pal_hints_u_t *) &r10);
540 break;
541 case PAL_REGISTER_INFO:
542 status = ia64_pal_register_info(in1, &r9, &r10);
543 break;
544 case PAL_CACHE_FLUSH:
545 /* Always call Host Pal in int=0 */
546 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
548 /*
549 * Call Host PAL cache flush
550 * Clear psr.ic when call PAL_CACHE_FLUSH
551 */
552 r10 = in3;
553 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
555 if (status != 0)
556 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
557 "status %lx", status);
559 break;
560 case PAL_PERF_MON_INFO:
561 {
562 unsigned long pm_buffer[16];
563 status = ia64_pal_perf_mon_info(
564 pm_buffer,
565 (pal_perf_mon_info_u_t *) &r9);
566 if (status != 0) {
567 while(1)
568 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
569 break;
570 }
571 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
572 while(1)
573 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
574 "can't copy to user!!!!\n");
575 status = PAL_STATUS_UNIMPLEMENTED;
576 break;
577 }
578 }
579 break;
580 case PAL_CACHE_INFO:
581 {
582 pal_cache_config_info_t ci;
583 status = ia64_pal_cache_config_info(in1,in2,&ci);
584 if (status != 0) break;
585 r9 = ci.pcci_info_1.pcci1_data;
586 r10 = ci.pcci_info_2.pcci2_data;
587 }
588 break;
589 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
590 printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
591 break;
592 case PAL_HALT_INFO:
593 {
594 /* 1000 cycles to enter/leave low power state,
595 consumes 10 mW, implemented and cache/TLB coherent. */
596 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
597 | (1UL << 61) | (1UL << 60);
598 if (copy_to_user ((void *)in1, &res, sizeof (res)))
599 status = PAL_STATUS_EINVAL;
600 else
601 status = PAL_STATUS_SUCCESS;
602 }
603 break;
604 case PAL_HALT:
605 if (current->domain == dom0) {
606 printk ("Domain0 halts the machine\n");
607 console_start_sync();
608 (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
609 } else {
610 set_bit(_VCPUF_down, &current->vcpu_flags);
611 vcpu_sleep_nosync(current);
612 status = PAL_STATUS_SUCCESS;
613 }
614 break;
615 case PAL_HALT_LIGHT:
616 if (VMX_DOMAIN(current)) {
617 /* Called by VTI. */
618 if (!is_unmasked_irq(current)) {
619 do_sched_op_compat(SCHEDOP_block, 0);
620 do_softirq();
621 }
622 status = PAL_STATUS_SUCCESS;
623 }
624 break;
625 case PAL_PLATFORM_ADDR:
626 if (VMX_DOMAIN(current))
627 status = PAL_STATUS_SUCCESS;
628 break;
629 case PAL_LOGICAL_TO_PHYSICAL:
630 /* Optional, no need to complain about being unimplemented */
631 break;
632 default:
633 printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
634 index);
635 break;
636 }
637 return ((struct ia64_pal_retval) {status, r9, r10, r11});
638 }
640 // given a current domain (virtual or metaphysical) address, return the virtual address
641 static unsigned long
642 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
643 struct page_info** page)
644 {
645 struct vcpu *v = current;
646 unsigned long mpaddr = domain_addr;
647 unsigned long virt;
648 *fault = IA64_NO_FAULT;
650 again:
651 if (v->domain->arch.sal_data->efi_virt_mode) {
652 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
653 if (*fault != IA64_NO_FAULT) return 0;
654 }
656 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
657 *page = virt_to_page(virt);
658 if (get_page(*page, current->domain) == 0) {
659 if (page_get_owner(*page) != current->domain) {
660 // which code is appropriate?
661 *fault = IA64_FAULT;
662 return 0;
663 }
664 goto again;
665 }
667 return virt;
668 }
670 static efi_status_t
671 efi_emulate_get_time(
672 unsigned long tv_addr, unsigned long tc_addr,
673 IA64FAULT *fault)
674 {
675 unsigned long tv, tc = 0;
676 struct page_info *tv_page = NULL;
677 struct page_info *tc_page = NULL;
678 efi_status_t status = 0;
679 efi_time_t *tvp;
680 struct tm timeptr;
681 unsigned long xtimesec;
683 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
684 if (*fault != IA64_NO_FAULT)
685 goto errout;
686 if (tc_addr) {
687 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
688 if (*fault != IA64_NO_FAULT)
689 goto errout;
690 }
692 spin_lock(&efi_time_services_lock);
693 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
694 tvp = (efi_time_t *)tv;
695 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
696 tvp->minute, tvp->second);
697 xtimesec += current->domain->time_offset_seconds;
698 timeptr = gmtime(xtimesec);
699 tvp->second = timeptr.tm_sec;
700 tvp->minute = timeptr.tm_min;
701 tvp->hour = timeptr.tm_hour;
702 tvp->day = timeptr.tm_mday;
703 tvp->month = timeptr.tm_mon + 1;
704 tvp->year = timeptr.tm_year + 1900;
705 spin_unlock(&efi_time_services_lock);
707 errout:
708 if (tc_page != NULL)
709 put_page(tc_page);
710 if (tv_page != NULL)
711 put_page(tv_page);
713 return status;
714 }
716 static efi_status_t
717 efi_emulate_set_time(
718 unsigned long tv_addr, IA64FAULT *fault)
719 {
720 unsigned long tv;
721 struct page_info *tv_page = NULL;
722 efi_status_t status = 0;
724 if (current->domain != dom0)
725 return EFI_UNSUPPORTED;
727 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
728 if (*fault != IA64_NO_FAULT)
729 goto errout;
731 spin_lock(&efi_time_services_lock);
732 status = (*efi.set_time)((efi_time_t *)tv);
733 spin_unlock(&efi_time_services_lock);
735 errout:
736 if (tv_page != NULL)
737 put_page(tv_page);
739 return status;
740 }
742 static efi_status_t
743 efi_emulate_get_wakeup_time(
744 unsigned long e_addr, unsigned long p_addr,
745 unsigned long tv_addr, IA64FAULT *fault)
746 {
747 unsigned long enabled, pending, tv;
748 struct page_info *e_page = NULL, *p_page = NULL,
749 *tv_page = NULL;
750 efi_status_t status = 0;
752 if (current->domain != dom0)
753 return EFI_UNSUPPORTED;
755 if (!e_addr || !p_addr || !tv_addr)
756 return EFI_INVALID_PARAMETER;
758 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
759 if (*fault != IA64_NO_FAULT)
760 goto errout;
761 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
762 if (*fault != IA64_NO_FAULT)
763 goto errout;
764 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
765 if (*fault != IA64_NO_FAULT)
766 goto errout;
768 spin_lock(&efi_time_services_lock);
769 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
770 (efi_bool_t *)pending,
771 (efi_time_t *)tv);
772 spin_unlock(&efi_time_services_lock);
774 errout:
775 if (e_page != NULL)
776 put_page(e_page);
777 if (p_page != NULL)
778 put_page(p_page);
779 if (tv_page != NULL)
780 put_page(tv_page);
782 return status;
783 }
785 static efi_status_t
786 efi_emulate_set_wakeup_time(
787 unsigned long enabled, unsigned long tv_addr,
788 IA64FAULT *fault)
789 {
790 unsigned long tv = 0;
791 struct page_info *tv_page = NULL;
792 efi_status_t status = 0;
794 if (current->domain != dom0)
795 return EFI_UNSUPPORTED;
797 if (tv_addr) {
798 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
799 if (*fault != IA64_NO_FAULT)
800 goto errout;
801 }
803 spin_lock(&efi_time_services_lock);
804 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
805 (efi_time_t *)tv);
806 spin_unlock(&efi_time_services_lock);
808 errout:
809 if (tv_page != NULL)
810 put_page(tv_page);
812 return status;
813 }
815 static efi_status_t
816 efi_emulate_get_variable(
817 unsigned long name_addr, unsigned long vendor_addr,
818 unsigned long attr_addr, unsigned long data_size_addr,
819 unsigned long data_addr, IA64FAULT *fault)
820 {
821 unsigned long name, vendor, attr = 0, data_size, data;
822 struct page_info *name_page = NULL, *vendor_page = NULL,
823 *attr_page = NULL, *data_size_page = NULL,
824 *data_page = NULL;
825 efi_status_t status = 0;
827 if (current->domain != dom0)
828 return EFI_UNSUPPORTED;
830 name = efi_translate_domain_addr(name_addr, fault, &name_page);
831 if (*fault != IA64_NO_FAULT)
832 goto errout;
833 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
834 if (*fault != IA64_NO_FAULT)
835 goto errout;
836 data_size = efi_translate_domain_addr(data_size_addr, fault,
837 &data_size_page);
838 if (*fault != IA64_NO_FAULT)
839 goto errout;
840 data = efi_translate_domain_addr(data_addr, fault, &data_page);
841 if (*fault != IA64_NO_FAULT)
842 goto errout;
843 if (attr_addr) {
844 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
845 if (*fault != IA64_NO_FAULT)
846 goto errout;
847 }
849 status = (*efi.get_variable)((efi_char16_t *)name,
850 (efi_guid_t *)vendor,
851 (u32 *)attr,
852 (unsigned long *)data_size,
853 (void *)data);
855 errout:
856 if (name_page != NULL)
857 put_page(name_page);
858 if (vendor_page != NULL)
859 put_page(vendor_page);
860 if (attr_page != NULL)
861 put_page(attr_page);
862 if (data_size_page != NULL)
863 put_page(data_size_page);
864 if (data_page != NULL)
865 put_page(data_page);
867 return status;
868 }
870 static efi_status_t
871 efi_emulate_get_next_variable(
872 unsigned long name_size_addr, unsigned long name_addr,
873 unsigned long vendor_addr, IA64FAULT *fault)
874 {
875 unsigned long name_size, name, vendor;
876 struct page_info *name_size_page = NULL, *name_page = NULL,
877 *vendor_page = NULL;
878 efi_status_t status = 0;
880 if (current->domain != dom0)
881 return EFI_UNSUPPORTED;
883 name_size = efi_translate_domain_addr(name_size_addr, fault,
884 &name_size_page);
885 if (*fault != IA64_NO_FAULT)
886 goto errout;
887 name = efi_translate_domain_addr(name_addr, fault, &name_page);
888 if (*fault != IA64_NO_FAULT)
889 goto errout;
890 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
891 if (*fault != IA64_NO_FAULT)
892 goto errout;
894 status = (*efi.get_next_variable)((unsigned long *)name_size,
895 (efi_char16_t *)name,
896 (efi_guid_t *)vendor);
898 errout:
899 if (name_size_page != NULL)
900 put_page(name_size_page);
901 if (name_page != NULL)
902 put_page(name_page);
903 if (vendor_page != NULL)
904 put_page(vendor_page);
906 return status;
907 }
909 static efi_status_t
910 efi_emulate_set_variable(
911 unsigned long name_addr, unsigned long vendor_addr,
912 unsigned long attr, unsigned long data_size,
913 unsigned long data_addr, IA64FAULT *fault)
914 {
915 unsigned long name, vendor, data;
916 struct page_info *name_page = NULL, *vendor_page = NULL,
917 *data_page = NULL;
918 efi_status_t status = 0;
920 if (current->domain != dom0)
921 return EFI_UNSUPPORTED;
923 name = efi_translate_domain_addr(name_addr, fault, &name_page);
924 if (*fault != IA64_NO_FAULT)
925 goto errout;
926 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
927 if (*fault != IA64_NO_FAULT)
928 goto errout;
929 data = efi_translate_domain_addr(data_addr, fault, &data_page);
930 if (*fault != IA64_NO_FAULT)
931 goto errout;
933 status = (*efi.set_variable)((efi_char16_t *)name,
934 (efi_guid_t *)vendor,
935 attr,
936 data_size,
937 (void *)data);
939 errout:
940 if (name_page != NULL)
941 put_page(name_page);
942 if (vendor_page != NULL)
943 put_page(vendor_page);
944 if (data_page != NULL)
945 put_page(data_page);
947 return status;
948 }
950 static efi_status_t
951 efi_emulate_set_virtual_address_map(
952 unsigned long memory_map_size, unsigned long descriptor_size,
953 u32 descriptor_version, efi_memory_desc_t *virtual_map)
954 {
955 void *efi_map_start, *efi_map_end, *p;
956 efi_memory_desc_t entry, *md = &entry;
957 u64 efi_desc_size;
959 unsigned long *vfn;
960 struct domain *d = current->domain;
961 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
962 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
964 if (descriptor_version != EFI_MEMDESC_VERSION) {
965 printk ("efi_emulate_set_virtual_address_map: memory "
966 "descriptor version unmatched (%d vs %d)\n",
967 (int)descriptor_version, EFI_MEMDESC_VERSION);
968 return EFI_INVALID_PARAMETER;
969 }
971 if (descriptor_size != sizeof(efi_memory_desc_t)) {
972 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
973 return EFI_INVALID_PARAMETER;
974 }
976 if (d->arch.sal_data->efi_virt_mode)
977 return EFI_UNSUPPORTED;
979 efi_map_start = virtual_map;
980 efi_map_end = efi_map_start + memory_map_size;
981 efi_desc_size = sizeof(efi_memory_desc_t);
983 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
984 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
985 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
986 return EFI_UNSUPPORTED;
987 }
989 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
990 if (md->type != EFI_PAL_CODE)
991 continue;
993 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
994 do { \
995 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
996 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
997 *vfn++ = 0; \
998 } while (0)
1000 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1001 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1002 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1003 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1004 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1005 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1006 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1007 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1008 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1009 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1011 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1012 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1013 *vfn = 0;
1014 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1015 break;
1018 /* The virtual address map has been applied. */
1019 d->arch.sal_data->efi_virt_mode = 1;
1021 return EFI_SUCCESS;
1024 efi_status_t
1025 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1027 struct vcpu *v = current;
1028 efi_status_t status;
1030 *fault = IA64_NO_FAULT;
1032 switch (regs->r2) {
1033 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1035 u8 reason;
1036 unsigned long val = vcpu_get_gr(v,32);
1037 switch (val)
1039 case EFI_RESET_SHUTDOWN:
1040 reason = SHUTDOWN_poweroff;
1041 break;
1042 case EFI_RESET_COLD:
1043 case EFI_RESET_WARM:
1044 default:
1045 reason = SHUTDOWN_reboot;
1046 break;
1048 domain_shutdown (current->domain, reason);
1050 status = EFI_UNSUPPORTED;
1051 break;
1052 case FW_HYPERCALL_EFI_GET_TIME:
1053 status = efi_emulate_get_time (
1054 vcpu_get_gr(v,32),
1055 vcpu_get_gr(v,33),
1056 fault);
1057 break;
1058 case FW_HYPERCALL_EFI_SET_TIME:
1059 status = efi_emulate_set_time (
1060 vcpu_get_gr(v,32),
1061 fault);
1062 break;
1063 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1064 status = efi_emulate_get_wakeup_time (
1065 vcpu_get_gr(v,32),
1066 vcpu_get_gr(v,33),
1067 vcpu_get_gr(v,34),
1068 fault);
1069 break;
1070 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1071 status = efi_emulate_set_wakeup_time (
1072 vcpu_get_gr(v,32),
1073 vcpu_get_gr(v,33),
1074 fault);
1075 break;
1076 case FW_HYPERCALL_EFI_GET_VARIABLE:
1077 status = efi_emulate_get_variable (
1078 vcpu_get_gr(v,32),
1079 vcpu_get_gr(v,33),
1080 vcpu_get_gr(v,34),
1081 vcpu_get_gr(v,35),
1082 vcpu_get_gr(v,36),
1083 fault);
1084 break;
1085 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1086 status = efi_emulate_get_next_variable (
1087 vcpu_get_gr(v,32),
1088 vcpu_get_gr(v,33),
1089 vcpu_get_gr(v,34),
1090 fault);
1091 break;
1092 case FW_HYPERCALL_EFI_SET_VARIABLE:
1093 status = efi_emulate_set_variable (
1094 vcpu_get_gr(v,32),
1095 vcpu_get_gr(v,33),
1096 vcpu_get_gr(v,34),
1097 vcpu_get_gr(v,35),
1098 vcpu_get_gr(v,36),
1099 fault);
1100 break;
1101 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1102 status = efi_emulate_set_virtual_address_map (
1103 vcpu_get_gr(v,32),
1104 vcpu_get_gr(v,33),
1105 (u32) vcpu_get_gr(v,34),
1106 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1107 break;
1108 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1109 // FIXME: need fixes in efi.h from 2.6.9
1110 status = EFI_UNSUPPORTED;
1111 break;
1112 default:
1113 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1114 status = EFI_UNSUPPORTED;
1117 return status;
1120 void
1121 do_ssc(unsigned long ssc, struct pt_regs *regs)
1123 unsigned long arg0, arg1, arg2, arg3, retval;
1124 char buf[2];
1125 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1126 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1127 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1129 arg0 = vcpu_get_gr(current,32);
1130 switch(ssc) {
1131 case SSC_PUTCHAR:
1132 buf[0] = arg0;
1133 buf[1] = '\0';
1134 printk(buf);
1135 break;
1136 case SSC_GETCHAR:
1137 retval = ia64_ssc(0,0,0,0,ssc);
1138 vcpu_set_gr(current,8,retval,0);
1139 break;
1140 case SSC_WAIT_COMPLETION:
1141 if (arg0) { // metaphysical address
1143 arg0 = translate_domain_mpaddr(arg0, NULL);
1144 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1145 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1146 /**/ stat->count = last_count;
1147 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1148 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1149 /**/ retval = 0;
1151 else retval = -1L;
1152 vcpu_set_gr(current,8,retval,0);
1153 break;
1154 case SSC_OPEN:
1155 arg1 = vcpu_get_gr(current,33); // access rights
1156 if (!running_on_sim) { printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
1157 if (arg0) { // metaphysical address
1158 arg0 = translate_domain_mpaddr(arg0, NULL);
1159 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1161 else retval = -1L;
1162 vcpu_set_gr(current,8,retval,0);
1163 break;
1164 case SSC_WRITE:
1165 case SSC_READ:
1166 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1167 arg1 = vcpu_get_gr(current,33);
1168 arg2 = vcpu_get_gr(current,34);
1169 arg3 = vcpu_get_gr(current,35);
1170 if (arg2) { // metaphysical address of descriptor
1171 struct ssc_disk_req *req;
1172 unsigned long mpaddr;
1173 long len;
1175 arg2 = translate_domain_mpaddr(arg2, NULL);
1176 req = (struct ssc_disk_req *) __va(arg2);
1177 req->len &= 0xffffffffL; // avoid strange bug
1178 len = req->len;
1179 /**/ last_fd = arg1;
1180 /**/ last_count = len;
1181 mpaddr = req->addr;
1182 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1183 retval = 0;
1184 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1185 // do partial page first
1186 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1187 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1188 len -= req->len; mpaddr += req->len;
1189 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1190 arg3 += req->len; // file offset
1191 /**/ last_stat.fd = last_fd;
1192 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1193 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1195 if (retval >= 0) while (len > 0) {
1196 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1197 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1198 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1199 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1200 arg3 += req->len; // file offset
1201 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1202 /**/ last_stat.fd = last_fd;
1203 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1204 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1206 // set it back to the original value
1207 req->len = last_count;
1209 else retval = -1L;
1210 vcpu_set_gr(current,8,retval,0);
1211 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1212 break;
1213 case SSC_CONNECT_INTERRUPT:
1214 arg1 = vcpu_get_gr(current,33);
1215 arg2 = vcpu_get_gr(current,34);
1216 arg3 = vcpu_get_gr(current,35);
1217 if (!running_on_sim) { printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
1218 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1219 break;
1220 case SSC_NETDEV_PROBE:
1221 vcpu_set_gr(current,8,-1L,0);
1222 break;
1223 default:
1224 panic_domain(regs,
1225 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1226 __func__, ssc, regs->cr_iip, regs->b0);
1227 break;
1229 vcpu_increment_iip(current);