ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 16330:a071725bda88

[IA64] Ignore SAL_PHYSICAL_ID_INFO

Newer upstream Linux kernels calls this. Simply return
unimplemented for now.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Tue Nov 06 14:20:05 2007 -0700 (2007-11-06)
parents 41c1731c9125
children e6069a715fd7
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/sn/sn_sal.h>
26 #include <asm/sn/hubdev.h>
27 #include <asm/xenmca.h>
29 #include <public/sched.h>
30 #include "hpsim_ssc.h"
31 #include <asm/vcpu.h>
32 #include <asm/vmx_vcpu.h>
33 #include <asm/dom_fw.h>
34 #include <asm/uaccess.h>
35 #include <xen/console.h>
36 #include <xen/hypercall.h>
37 #include <xen/softirq.h>
38 #include <xen/time.h>
39 #include <asm/debugger.h>
41 static DEFINE_SPINLOCK(efi_time_services_lock);
43 struct sal_mc_params {
44 u64 param_type;
45 u64 i_or_m;
46 u64 i_or_m_val;
47 u64 timeout;
48 u64 rz_always;
49 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
51 struct sal_vectors {
52 u64 vector_type;
53 u64 handler_addr1;
54 u64 gp1;
55 u64 handler_len1;
56 u64 handler_addr2;
57 u64 gp2;
58 u64 handler_len2;
59 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
61 struct smp_call_args_t {
62 u64 type;
63 u64 ret;
64 u64 target;
65 struct domain *domain;
66 int corrected;
67 int status;
68 void *data;
69 };
71 extern sal_log_record_header_t *sal_record;
72 DEFINE_SPINLOCK(sal_record_lock);
74 extern spinlock_t sal_queue_lock;
76 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
78 #if defined(IA64_SAL_DEBUG_INFO)
79 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
81 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
82 #else
83 # define IA64_SAL_DEBUG(fmt...)
84 #endif
86 void get_state_info_on(void *data) {
87 struct smp_call_args_t *arg = data;
88 int flags;
90 spin_lock_irqsave(&sal_record_lock, flags);
91 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
92 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
93 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
94 rec_name[arg->type], smp_processor_id(), arg->ret);
95 if (arg->corrected) {
96 sal_record->severity = sal_log_severity_corrected;
97 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
98 " force\n", __FUNCTION__);
99 }
100 if (arg->ret > 0) {
101 /*
102 * Save current->domain and set to local(caller) domain for
103 * xencomm_paddr_to_maddr() which calculates maddr from
104 * paddr using mpa value of current->domain.
105 */
106 struct domain *save;
107 save = current->domain;
108 current->domain = arg->domain;
109 if (xencomm_copy_to_guest((void*)arg->target,
110 sal_record, arg->ret, 0)) {
111 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
112 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
113 arg->ret = 0;
114 }
115 /* Restore current->domain to saved value. */
116 current->domain = save;
117 }
118 spin_unlock_irqrestore(&sal_record_lock, flags);
119 }
121 void clear_state_info_on(void *data) {
122 struct smp_call_args_t *arg = data;
124 arg->ret = ia64_sal_clear_state_info(arg->type);
125 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
126 rec_name[arg->type], smp_processor_id(), arg->ret);
128 }
130 struct sal_ret_values
131 sal_emulator (long index, unsigned long in1, unsigned long in2,
132 unsigned long in3, unsigned long in4, unsigned long in5,
133 unsigned long in6, unsigned long in7)
134 {
135 struct ia64_sal_retval ret_stuff;
136 unsigned long r9 = 0;
137 unsigned long r10 = 0;
138 long r11 = 0;
139 long status;
141 debugger_event(XEN_IA64_DEBUG_ON_SAL);
143 status = 0;
144 switch (index) {
145 case SAL_FREQ_BASE:
146 if (likely(!running_on_sim))
147 status = ia64_sal_freq_base(in1,&r9,&r10);
148 else switch (in1) {
149 case SAL_FREQ_BASE_PLATFORM:
150 r9 = 200000000;
151 break;
153 case SAL_FREQ_BASE_INTERVAL_TIMER:
154 r9 = 700000000;
155 break;
157 case SAL_FREQ_BASE_REALTIME_CLOCK:
158 r9 = 1;
159 break;
161 default:
162 status = -1;
163 break;
164 }
165 break;
166 case SAL_PCI_CONFIG_READ:
167 if (current->domain == dom0) {
168 u64 value;
169 // note that args 2&3 are swapped!!
170 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
171 r9 = value;
172 }
173 else
174 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
175 break;
176 case SAL_PCI_CONFIG_WRITE:
177 if (current->domain == dom0) {
178 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
179 (in4 > 1) ||
180 (in2 > 8) || (in2 & (in2-1)))
181 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
182 in1,in4,in2,in3);
183 // note that args are in a different order!!
184 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
185 }
186 else
187 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
188 break;
189 case SAL_SET_VECTORS:
190 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
191 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
192 /* Sanity check: cs_length1 must be 0,
193 second vector is reserved. */
194 status = -2;
195 }
196 else {
197 struct domain *d = current->domain;
198 d->arch.sal_data->boot_rdv_ip = in2;
199 d->arch.sal_data->boot_rdv_r1 = in3;
200 }
201 }
202 else
203 {
204 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
205 BUG();
206 sal_vectors[in1].vector_type = in1;
207 sal_vectors[in1].handler_addr1 = in2;
208 sal_vectors[in1].gp1 = in3;
209 sal_vectors[in1].handler_len1 = in4;
210 sal_vectors[in1].handler_addr2 = in5;
211 sal_vectors[in1].gp2 = in6;
212 sal_vectors[in1].handler_len2 = in7;
213 }
214 break;
215 case SAL_GET_STATE_INFO:
216 if (current->domain == dom0) {
217 sal_queue_entry_t *e;
218 unsigned long flags;
219 struct smp_call_args_t arg;
221 spin_lock_irqsave(&sal_queue_lock, flags);
222 if (!sal_queue || list_empty(&sal_queue[in1])) {
223 sal_log_record_header_t header;
224 XEN_GUEST_HANDLE(void) handle =
225 *(XEN_GUEST_HANDLE(void)*)&in3;
227 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
228 "no sal_queue entry found.\n",
229 rec_name[in1]);
230 memset(&header, 0, sizeof(header));
232 if (copy_to_guest(handle, &header, 1)) {
233 printk("sal_emulator: "
234 "SAL_GET_STATE_INFO can't copy "
235 "empty header to user: 0x%lx\n",
236 in3);
237 }
238 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
239 r9 = 0;
240 spin_unlock_irqrestore(&sal_queue_lock, flags);
241 break;
242 }
243 e = list_entry(sal_queue[in1].next,
244 sal_queue_entry_t, list);
246 list_del(&e->list);
247 spin_unlock_irqrestore(&sal_queue_lock, flags);
249 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
250 "on CPU#%d.\n",
251 rec_name[e->sal_info_type],
252 rec_name[in1], e->cpuid);
254 arg.type = e->sal_info_type;
255 arg.target = in3;
256 arg.corrected = !!((in1 != e->sal_info_type) &&
257 (e->sal_info_type == SAL_INFO_TYPE_MCA));
258 arg.domain = current->domain;
259 arg.status = 0;
261 if (e->cpuid == smp_processor_id()) {
262 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
263 get_state_info_on(&arg);
264 } else {
265 int ret;
266 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
267 ret = smp_call_function_single(e->cpuid,
268 get_state_info_on,
269 &arg, 0, 1);
270 if (ret < 0) {
271 printk("SAL_GET_STATE_INFO "
272 "smp_call_function_single error:"
273 " %d\n", ret);
274 arg.ret = 0;
275 arg.status =
276 IA64_SAL_NO_INFORMATION_AVAILABLE;
277 }
278 }
279 r9 = arg.ret;
280 status = arg.status;
281 if (r9 == 0) {
282 xfree(e);
283 } else {
284 /* Re-add the entry to sal_queue */
285 spin_lock_irqsave(&sal_queue_lock, flags);
286 list_add(&e->list, &sal_queue[in1]);
287 spin_unlock_irqrestore(&sal_queue_lock, flags);
288 }
289 } else {
290 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
291 r9 = 0;
292 }
293 break;
294 case SAL_GET_STATE_INFO_SIZE:
295 r9 = ia64_sal_get_state_info_size(in1);
296 break;
297 case SAL_CLEAR_STATE_INFO:
298 if (current->domain == dom0) {
299 sal_queue_entry_t *e;
300 unsigned long flags;
301 struct smp_call_args_t arg;
303 spin_lock_irqsave(&sal_queue_lock, flags);
304 if (list_empty(&sal_queue[in1])) {
305 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
306 "no sal_queue entry found.\n",
307 rec_name[in1]);
308 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
309 r9 = 0;
310 spin_unlock_irqrestore(&sal_queue_lock, flags);
311 break;
312 }
313 e = list_entry(sal_queue[in1].next,
314 sal_queue_entry_t, list);
316 list_del(&e->list);
317 spin_unlock_irqrestore(&sal_queue_lock, flags);
319 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
320 "on CPU#%d.\n",
321 rec_name[e->sal_info_type],
322 rec_name[in1], e->cpuid);
324 arg.type = e->sal_info_type;
325 arg.status = 0;
327 if (e->cpuid == smp_processor_id()) {
328 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
329 clear_state_info_on(&arg);
330 } else {
331 int ret;
332 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
333 ret = smp_call_function_single(e->cpuid,
334 clear_state_info_on, &arg, 0, 1);
335 if (ret < 0) {
336 printk("sal_emulator: "
337 "SAL_CLEAR_STATE_INFO "
338 "smp_call_function_single error:"
339 " %d\n", ret);
340 arg.ret = 0;
341 arg.status =
342 IA64_SAL_NO_INFORMATION_AVAILABLE;
343 }
344 }
345 r9 = arg.ret;
346 status = arg.status;
347 xfree(e);
348 }
349 break;
350 case SAL_MC_RENDEZ:
351 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
352 break;
353 case SAL_MC_SET_PARAMS:
354 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
355 BUG();
356 sal_mc_params[in1].param_type = in1;
357 sal_mc_params[in1].i_or_m = in2;
358 sal_mc_params[in1].i_or_m_val = in3;
359 sal_mc_params[in1].timeout = in4;
360 sal_mc_params[in1].rz_always = in5;
361 break;
362 case SAL_CACHE_FLUSH:
363 if (1) {
364 /* Flush using SAL.
365 This method is faster but has a side effect on
366 other vcpu running on this cpu. */
367 status = ia64_sal_cache_flush (in1);
368 }
369 else {
370 /* Flush with fc all the domain.
371 This method is slower but has no side effects. */
372 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
373 status = 0;
374 }
375 break;
376 case SAL_CACHE_INIT:
377 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
378 break;
379 case SAL_UPDATE_PAL:
380 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
381 break;
382 case SAL_PHYSICAL_ID_INFO:
383 status = -1;
384 break;
385 case SAL_XEN_SAL_RETURN:
386 if (!test_and_set_bit(_VPF_down, &current->pause_flags))
387 vcpu_sleep_nosync(current);
388 break;
389 case SN_SAL_GET_MASTER_NASID:
390 status = -1;
391 if (current->domain == dom0) {
392 /* printk("*** Emulating SN_SAL_GET_MASTER_NASID ***\n"); */
393 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_MASTER_NASID,
394 0, 0, 0, 0, 0, 0, 0);
395 status = ret_stuff.status;
396 r9 = ret_stuff.v0;
397 r10 = ret_stuff.v1;
398 r11 = ret_stuff.v2;
399 }
400 break;
401 case SN_SAL_GET_KLCONFIG_ADDR:
402 status = -1;
403 if (current->domain == dom0) {
404 /* printk("*** Emulating SN_SAL_GET_KLCONFIG_ADDR ***\n"); */
405 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR,
406 in1, 0, 0, 0, 0, 0, 0);
407 status = ret_stuff.status;
408 r9 = ret_stuff.v0;
409 r10 = ret_stuff.v1;
410 r11 = ret_stuff.v2;
411 }
412 break;
413 case SN_SAL_GET_SAPIC_INFO:
414 status = -1;
415 if (current->domain == dom0) {
416 /* printk("*** Emulating SN_SAL_GET_SAPIC_INFO ***\n"); */
417 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO,
418 in1, 0, 0, 0, 0, 0, 0);
419 status = ret_stuff.status;
420 r9 = ret_stuff.v0;
421 r10 = ret_stuff.v1;
422 r11 = ret_stuff.v2;
423 }
424 break;
425 case SN_SAL_GET_SN_INFO:
426 status = -1;
427 if (current->domain == dom0) {
428 /* printk("*** Emulating SN_SAL_GET_SN_INFO ***\n"); */
429 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO,
430 in1, 0, 0, 0, 0, 0, 0);
431 status = ret_stuff.status;
432 r9 = ret_stuff.v0;
433 r10 = ret_stuff.v1;
434 r11 = ret_stuff.v2;
435 }
436 break;
437 case SN_SAL_IOIF_GET_HUBDEV_INFO:
438 status = -1;
439 if (current->domain == dom0) {
440 /* printk("*** Emulating SN_SAL_IOIF_GET_HUBDEV_INFO ***\n"); */
441 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_HUBDEV_INFO,
442 in1, in2, 0, 0, 0, 0, 0);
443 status = ret_stuff.status;
444 r9 = ret_stuff.v0;
445 r10 = ret_stuff.v1;
446 r11 = ret_stuff.v2;
447 }
448 break;
449 case SN_SAL_IOIF_INIT:
450 status = -1;
451 if (current->domain == dom0) {
452 /* printk("*** Emulating SN_SAL_IOIF_INIT ***\n"); */
453 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_INIT,
454 0, 0, 0, 0, 0, 0, 0);
455 status = ret_stuff.status;
456 r9 = ret_stuff.v0;
457 r10 = ret_stuff.v1;
458 r11 = ret_stuff.v2;
459 }
460 break;
461 case SN_SAL_GET_PROM_FEATURE_SET:
462 status = -1;
463 if (current->domain == dom0) {
464 /* printk("*** Emulating SN_SAL_GET_PROM_FEATURE_SET ***\n"); */
465 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_PROM_FEATURE_SET,
466 in1, 0, 0, 0, 0, 0, 0);
467 status = ret_stuff.status;
468 r9 = ret_stuff.v0;
469 r10 = ret_stuff.v1;
470 r11 = ret_stuff.v2;
471 }
472 break;
473 case SN_SAL_SET_OS_FEATURE_SET:
474 status = -1;
475 if (current->domain == dom0) {
476 /* printk("*** Emulating SN_SAL_SET_OS_FEATURE_SET ***\n"); */
477 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_OS_FEATURE_SET,
478 in1, 0, 0, 0, 0, 0, 0);
479 status = ret_stuff.status;
480 r9 = ret_stuff.v0;
481 r10 = ret_stuff.v1;
482 r11 = ret_stuff.v2;
483 }
484 break;
485 case SN_SAL_SET_ERROR_HANDLING_FEATURES:
486 status = -1;
487 if (current->domain == dom0) {
488 /* printk("*** Emulating SN_SAL_SET_ERROR_HANDLING_FEATURES ***\n"); */
489 SAL_CALL_NOLOCK(ret_stuff,
490 SN_SAL_SET_ERROR_HANDLING_FEATURES,
491 in1, 0, 0, 0, 0, 0, 0);
492 status = ret_stuff.status;
493 r9 = ret_stuff.v0;
494 r10 = ret_stuff.v1;
495 r11 = ret_stuff.v2;
496 }
497 break;
498 #if 0
499 /*
500 * Somehow ACPI breaks if allowing this one
501 */
502 case SN_SAL_SET_CPU_NUMBER:
503 status = -1;
504 if (current->domain == dom0) {
505 printk("*** Emulating SN_SAL_SET_CPU_NUMBER ***\n");
506 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_CPU_NUMBER,
507 in1, 0, 0, 0, 0, 0, 0);
508 status = ret_stuff.status;
509 r9 = ret_stuff.v0;
510 r10 = ret_stuff.v1;
511 r11 = ret_stuff.v2;
512 }
513 break;
514 #endif
515 case SN_SAL_LOG_CE:
516 status = -1;
517 if (current->domain == dom0) {
518 static int log_ce = 0;
519 if (!log_ce) {
520 printk("*** Emulating SN_SAL_LOG_CE *** "
521 " this will only be printed once\n");
522 log_ce = 1;
523 }
524 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE,
525 0, 0, 0, 0, 0, 0, 0);
526 status = ret_stuff.status;
527 r9 = ret_stuff.v0;
528 r10 = ret_stuff.v1;
529 r11 = ret_stuff.v2;
530 }
531 break;
532 case SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST:
533 status = -1;
534 if (current->domain == dom0) {
535 struct sn_flush_device_common flush;
536 int flush_size;
538 flush_size = sizeof(struct sn_flush_device_common);
539 memset(&flush, 0, flush_size);
540 SAL_CALL_NOLOCK(ret_stuff,
541 SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
542 in1, in2, in3, &flush, 0, 0, 0);
543 #if 0
544 printk("*** Emulating "
545 "SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST ***\n");
546 #endif
547 if (ret_stuff.status == SALRET_OK) {
548 XEN_GUEST_HANDLE(void) handle =
549 *(XEN_GUEST_HANDLE(void)*)&in4;
550 if (copy_to_guest(handle, &flush, 1)) {
551 printk("SN_SAL_IOIF_GET_DEVICE_"
552 "DMAFLUSH_LIST can't copy "
553 "to user!\n");
554 ret_stuff.status = SALRET_ERROR;
555 }
556 }
558 status = ret_stuff.status;
559 r9 = ret_stuff.v0;
560 r10 = ret_stuff.v1;
561 r11 = ret_stuff.v2;
562 }
563 break;
564 default:
565 printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). "
566 "IGNORED...\n", index);
567 status = -1;
568 break;
569 }
570 return ((struct sal_ret_values) {status, r9, r10, r11});
571 }
573 cpumask_t cpu_cache_coherent_map;
575 struct cache_flush_args {
576 u64 cache_type;
577 u64 operation;
578 u64 progress;
579 long status;
580 };
582 static void
583 remote_pal_cache_flush(void *v)
584 {
585 struct cache_flush_args *args = v;
586 long status;
587 u64 progress = args->progress;
589 status = ia64_pal_cache_flush(args->cache_type, args->operation,
590 &progress, NULL);
591 if (status != 0)
592 args->status = status;
593 }
595 static void
596 remote_pal_prefetch_visibility(void *v)
597 {
598 s64 trans_type = (s64)v;
599 ia64_pal_prefetch_visibility(trans_type);
600 }
602 static void
603 remote_pal_mc_drain(void *v)
604 {
605 ia64_pal_mc_drain();
606 }
608 struct ia64_pal_retval
609 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
610 {
611 unsigned long r9 = 0;
612 unsigned long r10 = 0;
613 unsigned long r11 = 0;
614 long status = PAL_STATUS_UNIMPLEMENTED;
615 unsigned long flags;
616 int processor;
618 if (unlikely(running_on_sim))
619 return pal_emulator_static(index);
621 debugger_event(XEN_IA64_DEBUG_ON_PAL);
623 // pal code must be mapped by a TR when pal is called, however
624 // calls are rare enough that we will map it lazily rather than
625 // at every context switch
626 //efi_map_pal_code();
627 switch (index) {
628 case PAL_MEM_ATTRIB:
629 status = ia64_pal_mem_attrib(&r9);
630 break;
631 case PAL_FREQ_BASE:
632 status = ia64_pal_freq_base(&r9);
633 if (status == PAL_STATUS_UNIMPLEMENTED) {
634 status = ia64_sal_freq_base(0, &r9, &r10);
635 r10 = 0;
636 }
637 break;
638 case PAL_PROC_GET_FEATURES:
639 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
640 break;
641 case PAL_BUS_GET_FEATURES:
642 status = ia64_pal_bus_get_features(
643 (pal_bus_features_u_t *) &r9,
644 (pal_bus_features_u_t *) &r10,
645 (pal_bus_features_u_t *) &r11);
646 break;
647 case PAL_FREQ_RATIOS:
648 status = ia64_pal_freq_ratios(
649 (struct pal_freq_ratio *) &r9,
650 (struct pal_freq_ratio *) &r10,
651 (struct pal_freq_ratio *) &r11);
652 break;
653 case PAL_PTCE_INFO:
654 /*
655 * return hard-coded xen-specific values because ptc.e
656 * is emulated on xen to always flush everything
657 * these values result in only one ptc.e instruction
658 */
659 status = PAL_STATUS_SUCCESS;
660 r10 = (1L << 32) | 1L;
661 break;
662 case PAL_VERSION:
663 status = ia64_pal_version(
664 (pal_version_u_t *) &r9,
665 (pal_version_u_t *) &r10);
666 break;
667 case PAL_VM_PAGE_SIZE:
668 status = ia64_pal_vm_page_size(&r9,&r10);
669 break;
670 case PAL_DEBUG_INFO:
671 status = ia64_pal_debug_info(&r9,&r10);
672 break;
673 case PAL_CACHE_SUMMARY:
674 status = ia64_pal_cache_summary(&r9,&r10);
675 break;
676 case PAL_VM_SUMMARY:
677 if (VMX_DOMAIN(current)) {
678 pal_vm_info_1_u_t v1;
679 pal_vm_info_2_u_t v2;
680 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
681 (pal_vm_info_2_u_t *)&v2);
682 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
683 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
684 v2.pal_vm_info_2_s.impl_va_msb -= 1;
685 v2.pal_vm_info_2_s.rid_size =
686 current->domain->arch.rid_bits;
687 r9 = v1.pvi1_val;
688 r10 = v2.pvi2_val;
689 } else {
690 /* Use xen-specific values.
691 hash_tag_id is somewhat random! */
692 static const pal_vm_info_1_u_t v1 =
693 {.pal_vm_info_1_s =
694 { .vw = 1,
695 .phys_add_size = 44,
696 .key_size = 16,
697 .max_pkr = XEN_IA64_NPKRS,
698 .hash_tag_id = 0x30,
699 .max_dtr_entry = NDTRS - 1,
700 .max_itr_entry = NITRS - 1,
701 .max_unique_tcs = 3,
702 .num_tc_levels = 2
703 }};
704 pal_vm_info_2_u_t v2;
705 v2.pvi2_val = 0;
706 v2.pal_vm_info_2_s.rid_size =
707 current->domain->arch.rid_bits;
708 v2.pal_vm_info_2_s.impl_va_msb = 50;
709 r9 = v1.pvi1_val;
710 r10 = v2.pvi2_val;
711 status = PAL_STATUS_SUCCESS;
712 }
713 break;
714 case PAL_VM_INFO:
715 if (VMX_DOMAIN(current)) {
716 status = ia64_pal_vm_info(in1, in2,
717 (pal_tc_info_u_t *)&r9, &r10);
718 break;
719 }
720 if (in1 == 0 && in2 == 2) {
721 /* Level 1: VHPT */
722 const pal_tc_info_u_t v =
723 { .pal_tc_info_s = {.num_sets = 128,
724 .associativity = 1,
725 .num_entries = 128,
726 .pf = 1,
727 .unified = 1,
728 .reduce_tr = 0,
729 .reserved = 0}};
730 r9 = v.pti_val;
731 /* Only support PAGE_SIZE tc. */
732 r10 = PAGE_SIZE;
733 status = PAL_STATUS_SUCCESS;
734 }
735 else if (in1 == 1 && (in2 == 1 || in2 == 2)) {
736 /* Level 2: itlb/dtlb, 1 entry. */
737 const pal_tc_info_u_t v =
738 { .pal_tc_info_s = {.num_sets = 1,
739 .associativity = 1,
740 .num_entries = 1,
741 .pf = 1,
742 .unified = 0,
743 .reduce_tr = 0,
744 .reserved = 0}};
745 r9 = v.pti_val;
746 /* Only support PAGE_SIZE tc. */
747 r10 = PAGE_SIZE;
748 status = PAL_STATUS_SUCCESS;
749 } else
750 status = PAL_STATUS_EINVAL;
751 break;
752 case PAL_RSE_INFO:
753 status = ia64_pal_rse_info(&r9, (pal_hints_u_t *)&r10);
754 break;
755 case PAL_REGISTER_INFO:
756 status = ia64_pal_register_info(in1, &r9, &r10);
757 break;
758 case PAL_CACHE_FLUSH:
759 if (in3 != 0) /* Initially progress_indicator must be 0 */
760 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
761 "progress_indicator=%lx", in3);
763 /* Always call Host Pal in int=0 */
764 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
766 if (in1 != PAL_CACHE_TYPE_COHERENT) {
767 struct cache_flush_args args = {
768 .cache_type = in1,
769 .operation = in2,
770 .progress = 0,
771 .status = 0
772 };
773 smp_call_function(remote_pal_cache_flush,
774 (void *)&args, 1, 1);
775 if (args.status != 0)
776 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
777 "remote status %lx", args.status);
778 }
780 /*
781 * Call Host PAL cache flush
782 * Clear psr.ic when call PAL_CACHE_FLUSH
783 */
784 r10 = in3;
785 local_irq_save(flags);
786 processor = current->processor;
787 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
788 local_irq_restore(flags);
790 if (status != 0)
791 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
792 "status %lx", status);
794 if (in1 == PAL_CACHE_TYPE_COHERENT) {
795 cpus_setall(current->arch.cache_coherent_map);
796 cpu_clear(processor, current->arch.cache_coherent_map);
797 cpus_setall(cpu_cache_coherent_map);
798 cpu_clear(processor, cpu_cache_coherent_map);
799 }
800 break;
801 case PAL_PERF_MON_INFO:
802 {
803 unsigned long pm_buffer[16];
804 status = ia64_pal_perf_mon_info(
805 pm_buffer,
806 (pal_perf_mon_info_u_t *) &r9);
807 if (status != 0) {
808 while(1)
809 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
810 break;
811 }
812 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
813 while(1)
814 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
815 "can't copy to user!!!!\n");
816 status = PAL_STATUS_UNIMPLEMENTED;
817 break;
818 }
819 }
820 break;
821 case PAL_CACHE_INFO:
822 {
823 pal_cache_config_info_t ci;
824 status = ia64_pal_cache_config_info(in1,in2,&ci);
825 if (status != 0)
826 break;
827 r9 = ci.pcci_info_1.pcci1_data;
828 r10 = ci.pcci_info_2.pcci2_data;
829 }
830 break;
831 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
832 printk("%s: PAL_VM_TR_READ unimplmented, ignored\n", __func__);
833 break;
834 case PAL_HALT_INFO:
835 {
836 /* 1000 cycles to enter/leave low power state,
837 consumes 10 mW, implemented and cache/TLB coherent. */
838 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
839 | (1UL << 61) | (1UL << 60);
840 if (copy_to_user ((void *)in1, &res, sizeof (res)))
841 status = PAL_STATUS_EINVAL;
842 else
843 status = PAL_STATUS_SUCCESS;
844 }
845 break;
846 case PAL_HALT:
847 set_bit(_VPF_down, &current->pause_flags);
848 vcpu_sleep_nosync(current);
849 status = PAL_STATUS_SUCCESS;
850 break;
851 case PAL_HALT_LIGHT:
852 if (VMX_DOMAIN(current)) {
853 /* Called by VTI. */
854 if (!is_unmasked_irq(current)) {
855 do_sched_op_compat(SCHEDOP_block, 0);
856 do_softirq();
857 }
858 status = PAL_STATUS_SUCCESS;
859 }
860 break;
861 case PAL_PLATFORM_ADDR:
862 if (VMX_DOMAIN(current))
863 status = PAL_STATUS_SUCCESS;
864 break;
865 case PAL_FIXED_ADDR:
866 status = PAL_STATUS_SUCCESS;
867 r9 = current->vcpu_id;
868 break;
869 case PAL_PREFETCH_VISIBILITY:
870 status = ia64_pal_prefetch_visibility(in1);
871 if (status == 0) {
872 /* must be performed on all remote processors
873 in the coherence domain. */
874 smp_call_function(remote_pal_prefetch_visibility,
875 (void *)in1, 1, 1);
876 status = 1; /* no more necessary on remote processor */
877 }
878 break;
879 case PAL_MC_DRAIN:
880 status = ia64_pal_mc_drain();
881 /* FIXME: All vcpus likely call PAL_MC_DRAIN.
882 That causes the congestion. */
883 smp_call_function(remote_pal_mc_drain, NULL, 1, 1);
884 break;
885 case PAL_BRAND_INFO:
886 if (in1 == 0) {
887 char brand_info[128];
888 status = ia64_pal_get_brand_info(brand_info);
889 if (status == PAL_STATUS_SUCCESS)
890 copy_to_user((void *)in2, brand_info, 128);
891 } else {
892 status = PAL_STATUS_EINVAL;
893 }
894 break;
895 case PAL_LOGICAL_TO_PHYSICAL:
896 case PAL_GET_PSTATE:
897 case PAL_CACHE_SHARED_INFO:
898 /* Optional, no need to complain about being unimplemented */
899 break;
900 default:
901 printk("%s: Unimplemented PAL Call %lu\n", __func__, index);
902 break;
903 }
904 return ((struct ia64_pal_retval) {status, r9, r10, r11});
905 }
907 // given a current domain (virtual or metaphysical) address, return the virtual address
908 static unsigned long
909 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
910 struct page_info** page)
911 {
912 struct vcpu *v = current;
913 unsigned long mpaddr = domain_addr;
914 unsigned long virt;
915 *fault = IA64_NO_FAULT;
917 again:
918 if (v->domain->arch.sal_data->efi_virt_mode) {
919 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
920 if (*fault != IA64_NO_FAULT) return 0;
921 }
923 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
924 *page = virt_to_page(virt);
925 if (get_page(*page, current->domain) == 0) {
926 if (page_get_owner(*page) != current->domain) {
927 // which code is appropriate?
928 *fault = IA64_FAULT;
929 return 0;
930 }
931 goto again;
932 }
934 return virt;
935 }
937 static efi_status_t
938 efi_emulate_get_time(
939 unsigned long tv_addr, unsigned long tc_addr,
940 IA64FAULT *fault)
941 {
942 unsigned long tv, tc = 0;
943 struct page_info *tv_page = NULL;
944 struct page_info *tc_page = NULL;
945 efi_status_t status = 0;
946 efi_time_t *tvp;
947 struct tm timeptr;
948 unsigned long xtimesec;
950 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
951 if (*fault != IA64_NO_FAULT)
952 goto errout;
953 if (tc_addr) {
954 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
955 if (*fault != IA64_NO_FAULT)
956 goto errout;
957 }
959 spin_lock(&efi_time_services_lock);
960 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
961 tvp = (efi_time_t *)tv;
962 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
963 tvp->minute, tvp->second);
964 xtimesec += current->domain->time_offset_seconds;
965 timeptr = gmtime(xtimesec);
966 tvp->second = timeptr.tm_sec;
967 tvp->minute = timeptr.tm_min;
968 tvp->hour = timeptr.tm_hour;
969 tvp->day = timeptr.tm_mday;
970 tvp->month = timeptr.tm_mon + 1;
971 tvp->year = timeptr.tm_year + 1900;
972 spin_unlock(&efi_time_services_lock);
974 errout:
975 if (tc_page != NULL)
976 put_page(tc_page);
977 if (tv_page != NULL)
978 put_page(tv_page);
980 return status;
981 }
983 static efi_status_t
984 efi_emulate_set_time(
985 unsigned long tv_addr, IA64FAULT *fault)
986 {
987 unsigned long tv;
988 struct page_info *tv_page = NULL;
989 efi_status_t status = 0;
991 if (current->domain != dom0)
992 return EFI_UNSUPPORTED;
994 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
995 if (*fault != IA64_NO_FAULT)
996 goto errout;
998 spin_lock(&efi_time_services_lock);
999 status = (*efi.set_time)((efi_time_t *)tv);
1000 spin_unlock(&efi_time_services_lock);
1002 errout:
1003 if (tv_page != NULL)
1004 put_page(tv_page);
1006 return status;
1009 static efi_status_t
1010 efi_emulate_get_wakeup_time(
1011 unsigned long e_addr, unsigned long p_addr,
1012 unsigned long tv_addr, IA64FAULT *fault)
1014 unsigned long enabled, pending, tv;
1015 struct page_info *e_page = NULL, *p_page = NULL,
1016 *tv_page = NULL;
1017 efi_status_t status = 0;
1019 if (current->domain != dom0)
1020 return EFI_UNSUPPORTED;
1022 if (!e_addr || !p_addr || !tv_addr)
1023 return EFI_INVALID_PARAMETER;
1025 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
1026 if (*fault != IA64_NO_FAULT)
1027 goto errout;
1028 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
1029 if (*fault != IA64_NO_FAULT)
1030 goto errout;
1031 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1032 if (*fault != IA64_NO_FAULT)
1033 goto errout;
1035 spin_lock(&efi_time_services_lock);
1036 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
1037 (efi_bool_t *)pending,
1038 (efi_time_t *)tv);
1039 spin_unlock(&efi_time_services_lock);
1041 errout:
1042 if (e_page != NULL)
1043 put_page(e_page);
1044 if (p_page != NULL)
1045 put_page(p_page);
1046 if (tv_page != NULL)
1047 put_page(tv_page);
1049 return status;
1052 static efi_status_t
1053 efi_emulate_set_wakeup_time(
1054 unsigned long enabled, unsigned long tv_addr,
1055 IA64FAULT *fault)
1057 unsigned long tv = 0;
1058 struct page_info *tv_page = NULL;
1059 efi_status_t status = 0;
1061 if (current->domain != dom0)
1062 return EFI_UNSUPPORTED;
1064 if (tv_addr) {
1065 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1066 if (*fault != IA64_NO_FAULT)
1067 goto errout;
1070 spin_lock(&efi_time_services_lock);
1071 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
1072 (efi_time_t *)tv);
1073 spin_unlock(&efi_time_services_lock);
1075 errout:
1076 if (tv_page != NULL)
1077 put_page(tv_page);
1079 return status;
1082 static efi_status_t
1083 efi_emulate_get_variable(
1084 unsigned long name_addr, unsigned long vendor_addr,
1085 unsigned long attr_addr, unsigned long data_size_addr,
1086 unsigned long data_addr, IA64FAULT *fault)
1088 unsigned long name, vendor, attr = 0, data_size, data;
1089 struct page_info *name_page = NULL, *vendor_page = NULL,
1090 *attr_page = NULL, *data_size_page = NULL,
1091 *data_page = NULL;
1092 efi_status_t status = 0;
1094 if (current->domain != dom0)
1095 return EFI_UNSUPPORTED;
1097 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1098 if (*fault != IA64_NO_FAULT)
1099 goto errout;
1100 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1101 if (*fault != IA64_NO_FAULT)
1102 goto errout;
1103 data_size = efi_translate_domain_addr(data_size_addr, fault,
1104 &data_size_page);
1105 if (*fault != IA64_NO_FAULT)
1106 goto errout;
1107 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1108 if (*fault != IA64_NO_FAULT)
1109 goto errout;
1110 if (attr_addr) {
1111 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
1112 if (*fault != IA64_NO_FAULT)
1113 goto errout;
1116 status = (*efi.get_variable)((efi_char16_t *)name,
1117 (efi_guid_t *)vendor,
1118 (u32 *)attr,
1119 (unsigned long *)data_size,
1120 (void *)data);
1122 errout:
1123 if (name_page != NULL)
1124 put_page(name_page);
1125 if (vendor_page != NULL)
1126 put_page(vendor_page);
1127 if (attr_page != NULL)
1128 put_page(attr_page);
1129 if (data_size_page != NULL)
1130 put_page(data_size_page);
1131 if (data_page != NULL)
1132 put_page(data_page);
1134 return status;
1137 static efi_status_t
1138 efi_emulate_get_next_variable(
1139 unsigned long name_size_addr, unsigned long name_addr,
1140 unsigned long vendor_addr, IA64FAULT *fault)
1142 unsigned long name_size, name, vendor;
1143 struct page_info *name_size_page = NULL, *name_page = NULL,
1144 *vendor_page = NULL;
1145 efi_status_t status = 0;
1147 if (current->domain != dom0)
1148 return EFI_UNSUPPORTED;
1150 name_size = efi_translate_domain_addr(name_size_addr, fault,
1151 &name_size_page);
1152 if (*fault != IA64_NO_FAULT)
1153 goto errout;
1154 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1155 if (*fault != IA64_NO_FAULT)
1156 goto errout;
1157 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1158 if (*fault != IA64_NO_FAULT)
1159 goto errout;
1161 status = (*efi.get_next_variable)((unsigned long *)name_size,
1162 (efi_char16_t *)name,
1163 (efi_guid_t *)vendor);
1165 errout:
1166 if (name_size_page != NULL)
1167 put_page(name_size_page);
1168 if (name_page != NULL)
1169 put_page(name_page);
1170 if (vendor_page != NULL)
1171 put_page(vendor_page);
1173 return status;
1176 static efi_status_t
1177 efi_emulate_set_variable(
1178 unsigned long name_addr, unsigned long vendor_addr,
1179 unsigned long attr, unsigned long data_size,
1180 unsigned long data_addr, IA64FAULT *fault)
1182 unsigned long name, vendor, data;
1183 struct page_info *name_page = NULL, *vendor_page = NULL,
1184 *data_page = NULL;
1185 efi_status_t status = 0;
1187 if (current->domain != dom0)
1188 return EFI_UNSUPPORTED;
1190 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1191 if (*fault != IA64_NO_FAULT)
1192 goto errout;
1193 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1194 if (*fault != IA64_NO_FAULT)
1195 goto errout;
1196 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1197 if (*fault != IA64_NO_FAULT)
1198 goto errout;
1200 status = (*efi.set_variable)((efi_char16_t *)name,
1201 (efi_guid_t *)vendor,
1202 attr,
1203 data_size,
1204 (void *)data);
1206 errout:
1207 if (name_page != NULL)
1208 put_page(name_page);
1209 if (vendor_page != NULL)
1210 put_page(vendor_page);
1211 if (data_page != NULL)
1212 put_page(data_page);
1214 return status;
1217 static efi_status_t
1218 efi_emulate_set_virtual_address_map(
1219 unsigned long memory_map_size, unsigned long descriptor_size,
1220 u32 descriptor_version, efi_memory_desc_t *virtual_map)
1222 void *efi_map_start, *efi_map_end, *p;
1223 efi_memory_desc_t entry, *md = &entry;
1224 u64 efi_desc_size;
1226 unsigned long *vfn;
1227 struct domain *d = current->domain;
1228 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
1229 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
1231 if (descriptor_version != EFI_MEMDESC_VERSION) {
1232 printk ("efi_emulate_set_virtual_address_map: memory "
1233 "descriptor version unmatched (%d vs %d)\n",
1234 (int)descriptor_version, EFI_MEMDESC_VERSION);
1235 return EFI_INVALID_PARAMETER;
1238 if (descriptor_size != sizeof(efi_memory_desc_t)) {
1239 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
1240 return EFI_INVALID_PARAMETER;
1243 if (d->arch.sal_data->efi_virt_mode)
1244 return EFI_UNSUPPORTED;
1246 efi_map_start = virtual_map;
1247 efi_map_end = efi_map_start + memory_map_size;
1248 efi_desc_size = sizeof(efi_memory_desc_t);
1250 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1251 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
1252 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
1253 return EFI_UNSUPPORTED;
1256 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
1257 if (md->type != EFI_PAL_CODE)
1258 continue;
1260 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
1261 do { \
1262 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
1263 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
1264 *vfn++ = 0; \
1265 } while (0)
1267 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1268 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1269 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1270 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1271 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1272 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1273 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1274 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1275 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1276 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1278 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1279 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1280 *vfn = 0;
1281 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1282 break;
1285 /* The virtual address map has been applied. */
1286 d->arch.sal_data->efi_virt_mode = 1;
1288 return EFI_SUCCESS;
1291 efi_status_t
1292 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1294 struct vcpu *v = current;
1295 efi_status_t status;
1297 debugger_event(XEN_IA64_DEBUG_ON_EFI);
1299 *fault = IA64_NO_FAULT;
1301 switch (regs->r2) {
1302 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1304 u8 reason;
1305 unsigned long val = vcpu_get_gr(v,32);
1306 switch (val)
1308 case EFI_RESET_SHUTDOWN:
1309 reason = SHUTDOWN_poweroff;
1310 break;
1311 case EFI_RESET_COLD:
1312 case EFI_RESET_WARM:
1313 default:
1314 reason = SHUTDOWN_reboot;
1315 break;
1317 domain_shutdown (current->domain, reason);
1319 status = EFI_UNSUPPORTED;
1320 break;
1321 case FW_HYPERCALL_EFI_GET_TIME:
1322 status = efi_emulate_get_time (
1323 vcpu_get_gr(v,32),
1324 vcpu_get_gr(v,33),
1325 fault);
1326 break;
1327 case FW_HYPERCALL_EFI_SET_TIME:
1328 status = efi_emulate_set_time (
1329 vcpu_get_gr(v,32),
1330 fault);
1331 break;
1332 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1333 status = efi_emulate_get_wakeup_time (
1334 vcpu_get_gr(v,32),
1335 vcpu_get_gr(v,33),
1336 vcpu_get_gr(v,34),
1337 fault);
1338 break;
1339 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1340 status = efi_emulate_set_wakeup_time (
1341 vcpu_get_gr(v,32),
1342 vcpu_get_gr(v,33),
1343 fault);
1344 break;
1345 case FW_HYPERCALL_EFI_GET_VARIABLE:
1346 status = efi_emulate_get_variable (
1347 vcpu_get_gr(v,32),
1348 vcpu_get_gr(v,33),
1349 vcpu_get_gr(v,34),
1350 vcpu_get_gr(v,35),
1351 vcpu_get_gr(v,36),
1352 fault);
1353 break;
1354 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1355 status = efi_emulate_get_next_variable (
1356 vcpu_get_gr(v,32),
1357 vcpu_get_gr(v,33),
1358 vcpu_get_gr(v,34),
1359 fault);
1360 break;
1361 case FW_HYPERCALL_EFI_SET_VARIABLE:
1362 status = efi_emulate_set_variable (
1363 vcpu_get_gr(v,32),
1364 vcpu_get_gr(v,33),
1365 vcpu_get_gr(v,34),
1366 vcpu_get_gr(v,35),
1367 vcpu_get_gr(v,36),
1368 fault);
1369 break;
1370 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1371 status = efi_emulate_set_virtual_address_map (
1372 vcpu_get_gr(v,32),
1373 vcpu_get_gr(v,33),
1374 (u32) vcpu_get_gr(v,34),
1375 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1376 break;
1377 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1378 // FIXME: need fixes in efi.h from 2.6.9
1379 status = EFI_UNSUPPORTED;
1380 break;
1381 default:
1382 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1383 status = EFI_UNSUPPORTED;
1386 return status;
1389 void
1390 do_ssc(unsigned long ssc, struct pt_regs *regs)
1392 unsigned long arg0, arg1, arg2, arg3, retval;
1393 char buf[2];
1394 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1395 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1396 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1398 arg0 = vcpu_get_gr(current,32);
1399 switch(ssc) {
1400 case SSC_PUTCHAR:
1401 buf[0] = arg0;
1402 buf[1] = '\0';
1403 printk(buf);
1404 break;
1405 case SSC_GETCHAR:
1406 retval = ia64_ssc(0,0,0,0,ssc);
1407 vcpu_set_gr(current,8,retval,0);
1408 break;
1409 case SSC_WAIT_COMPLETION:
1410 if (arg0) { // metaphysical address
1412 arg0 = translate_domain_mpaddr(arg0, NULL);
1413 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1414 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1415 /**/ stat->count = last_count;
1416 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1417 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1418 /**/ retval = 0;
1420 else retval = -1L;
1421 vcpu_set_gr(current,8,retval,0);
1422 break;
1423 case SSC_OPEN:
1424 arg1 = vcpu_get_gr(current,33); // access rights
1425 if (!running_on_sim) {
1426 printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n");
1427 arg0 = 0;
1429 if (arg0) { // metaphysical address
1430 arg0 = translate_domain_mpaddr(arg0, NULL);
1431 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1433 else retval = -1L;
1434 vcpu_set_gr(current,8,retval,0);
1435 break;
1436 case SSC_WRITE:
1437 case SSC_READ:
1438 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1439 arg1 = vcpu_get_gr(current,33);
1440 arg2 = vcpu_get_gr(current,34);
1441 arg3 = vcpu_get_gr(current,35);
1442 if (arg2) { // metaphysical address of descriptor
1443 struct ssc_disk_req *req;
1444 unsigned long mpaddr;
1445 long len;
1447 arg2 = translate_domain_mpaddr(arg2, NULL);
1448 req = (struct ssc_disk_req *) __va(arg2);
1449 req->len &= 0xffffffffL; // avoid strange bug
1450 len = req->len;
1451 /**/ last_fd = arg1;
1452 /**/ last_count = len;
1453 mpaddr = req->addr;
1454 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1455 retval = 0;
1456 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1457 // do partial page first
1458 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1459 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1460 len -= req->len; mpaddr += req->len;
1461 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1462 arg3 += req->len; // file offset
1463 /**/ last_stat.fd = last_fd;
1464 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1465 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1467 if (retval >= 0) while (len > 0) {
1468 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1469 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1470 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1471 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1472 arg3 += req->len; // file offset
1473 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1474 /**/ last_stat.fd = last_fd;
1475 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1476 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1478 // set it back to the original value
1479 req->len = last_count;
1481 else retval = -1L;
1482 vcpu_set_gr(current,8,retval,0);
1483 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1484 break;
1485 case SSC_CONNECT_INTERRUPT:
1486 arg1 = vcpu_get_gr(current,33);
1487 arg2 = vcpu_get_gr(current,34);
1488 arg3 = vcpu_get_gr(current,35);
1489 if (!running_on_sim) {
1490 printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n");
1491 break;
1493 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1494 break;
1495 case SSC_NETDEV_PROBE:
1496 vcpu_set_gr(current,8,-1L,0);
1497 break;
1498 default:
1499 panic_domain(regs,
1500 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1501 __func__, ssc, regs->cr_iip, regs->b0);
1502 break;
1504 vcpu_increment_iip(current);