ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 15423:cbf749e9961f

[IA64] Cleanup: Move is_platform_hp_ski() from xenmisc.c to xensetup.c

- only caller is start_kernel
- change to static __init
- also move running_on_sim to xensetup.c, and change it from unsigned
long to int, since it's just a boolean
- declare running_on_sim in config.h near some other externs

Tested by building, booting, starting a PV guest on rx2620.

Signed-off-by: Aron Griffis <aron@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 02 10:25:29 2007 -0600 (2007-07-02)
parents 6b1b119191f1
children 522a1932111f
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/sn/sn_sal.h>
26 #include <asm/sn/hubdev.h>
27 #include <asm/xenmca.h>
29 #include <public/sched.h>
30 #include "hpsim_ssc.h"
31 #include <asm/vcpu.h>
32 #include <asm/vmx_vcpu.h>
33 #include <asm/dom_fw.h>
34 #include <asm/uaccess.h>
35 #include <xen/console.h>
36 #include <xen/hypercall.h>
37 #include <xen/softirq.h>
38 #include <xen/time.h>
40 static DEFINE_SPINLOCK(efi_time_services_lock);
42 struct sal_mc_params {
43 u64 param_type;
44 u64 i_or_m;
45 u64 i_or_m_val;
46 u64 timeout;
47 u64 rz_always;
48 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
50 struct sal_vectors {
51 u64 vector_type;
52 u64 handler_addr1;
53 u64 gp1;
54 u64 handler_len1;
55 u64 handler_addr2;
56 u64 gp2;
57 u64 handler_len2;
58 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
60 struct smp_call_args_t {
61 u64 type;
62 u64 ret;
63 u64 target;
64 struct domain *domain;
65 int corrected;
66 int status;
67 void *data;
68 };
70 extern sal_log_record_header_t *sal_record;
71 DEFINE_SPINLOCK(sal_record_lock);
73 extern spinlock_t sal_queue_lock;
75 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
77 #if defined(IA64_SAL_DEBUG_INFO)
78 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
80 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
81 #else
82 # define IA64_SAL_DEBUG(fmt...)
83 #endif
85 void get_state_info_on(void *data) {
86 struct smp_call_args_t *arg = data;
87 int flags;
89 spin_lock_irqsave(&sal_record_lock, flags);
90 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
91 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
92 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
93 rec_name[arg->type], smp_processor_id(), arg->ret);
94 if (arg->corrected) {
95 sal_record->severity = sal_log_severity_corrected;
96 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
97 " force\n", __FUNCTION__);
98 }
99 if (arg->ret > 0) {
100 /*
101 * Save current->domain and set to local(caller) domain for
102 * xencomm_paddr_to_maddr() which calculates maddr from
103 * paddr using mpa value of current->domain.
104 */
105 struct domain *save;
106 save = current->domain;
107 current->domain = arg->domain;
108 if (xencomm_copy_to_guest((void*)arg->target,
109 sal_record, arg->ret, 0)) {
110 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
111 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
112 arg->ret = 0;
113 }
114 /* Restore current->domain to saved value. */
115 current->domain = save;
116 }
117 spin_unlock_irqrestore(&sal_record_lock, flags);
118 }
120 void clear_state_info_on(void *data) {
121 struct smp_call_args_t *arg = data;
123 arg->ret = ia64_sal_clear_state_info(arg->type);
124 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
125 rec_name[arg->type], smp_processor_id(), arg->ret);
127 }
129 struct sal_ret_values
130 sal_emulator (long index, unsigned long in1, unsigned long in2,
131 unsigned long in3, unsigned long in4, unsigned long in5,
132 unsigned long in6, unsigned long in7)
133 {
134 struct ia64_sal_retval ret_stuff;
135 unsigned long r9 = 0;
136 unsigned long r10 = 0;
137 long r11 = 0;
138 long status;
140 status = 0;
141 switch (index) {
142 case SAL_FREQ_BASE:
143 if (likely(!running_on_sim))
144 status = ia64_sal_freq_base(in1,&r9,&r10);
145 else switch (in1) {
146 case SAL_FREQ_BASE_PLATFORM:
147 r9 = 200000000;
148 break;
150 case SAL_FREQ_BASE_INTERVAL_TIMER:
151 r9 = 700000000;
152 break;
154 case SAL_FREQ_BASE_REALTIME_CLOCK:
155 r9 = 1;
156 break;
158 default:
159 status = -1;
160 break;
161 }
162 break;
163 case SAL_PCI_CONFIG_READ:
164 if (current->domain == dom0) {
165 u64 value;
166 // note that args 2&3 are swapped!!
167 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
168 r9 = value;
169 }
170 else
171 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
172 break;
173 case SAL_PCI_CONFIG_WRITE:
174 if (current->domain == dom0) {
175 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
176 (in4 > 1) ||
177 (in2 > 8) || (in2 & (in2-1)))
178 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
179 in1,in4,in2,in3);
180 // note that args are in a different order!!
181 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
182 }
183 else
184 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
185 break;
186 case SAL_SET_VECTORS:
187 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
188 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
189 /* Sanity check: cs_length1 must be 0,
190 second vector is reserved. */
191 status = -2;
192 }
193 else {
194 struct domain *d = current->domain;
195 d->arch.sal_data->boot_rdv_ip = in2;
196 d->arch.sal_data->boot_rdv_r1 = in3;
197 }
198 }
199 else
200 {
201 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
202 BUG();
203 sal_vectors[in1].vector_type = in1;
204 sal_vectors[in1].handler_addr1 = in2;
205 sal_vectors[in1].gp1 = in3;
206 sal_vectors[in1].handler_len1 = in4;
207 sal_vectors[in1].handler_addr2 = in5;
208 sal_vectors[in1].gp2 = in6;
209 sal_vectors[in1].handler_len2 = in7;
210 }
211 break;
212 case SAL_GET_STATE_INFO:
213 if (current->domain == dom0) {
214 sal_queue_entry_t *e;
215 unsigned long flags;
216 struct smp_call_args_t arg;
218 spin_lock_irqsave(&sal_queue_lock, flags);
219 if (!sal_queue || list_empty(&sal_queue[in1])) {
220 sal_log_record_header_t header;
221 XEN_GUEST_HANDLE(void) handle =
222 *(XEN_GUEST_HANDLE(void)*)&in3;
224 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
225 "no sal_queue entry found.\n",
226 rec_name[in1]);
227 memset(&header, 0, sizeof(header));
229 if (copy_to_guest(handle, &header, 1)) {
230 printk("sal_emulator: "
231 "SAL_GET_STATE_INFO can't copy "
232 "empty header to user: 0x%lx\n",
233 in3);
234 }
235 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
236 r9 = 0;
237 spin_unlock_irqrestore(&sal_queue_lock, flags);
238 break;
239 }
240 e = list_entry(sal_queue[in1].next,
241 sal_queue_entry_t, list);
242 spin_unlock_irqrestore(&sal_queue_lock, flags);
244 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
245 "on CPU#%d.\n",
246 rec_name[e->sal_info_type],
247 rec_name[in1], e->cpuid);
249 arg.type = e->sal_info_type;
250 arg.target = in3;
251 arg.corrected = !!((in1 != e->sal_info_type) &&
252 (e->sal_info_type == SAL_INFO_TYPE_MCA));
253 arg.domain = current->domain;
254 arg.status = 0;
256 if (e->cpuid == smp_processor_id()) {
257 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
258 get_state_info_on(&arg);
259 } else {
260 int ret;
261 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
262 ret = smp_call_function_single(e->cpuid,
263 get_state_info_on,
264 &arg, 0, 1);
265 if (ret < 0) {
266 printk("SAL_GET_STATE_INFO "
267 "smp_call_function_single error:"
268 " %d\n", ret);
269 arg.ret = 0;
270 arg.status =
271 IA64_SAL_NO_INFORMATION_AVAILABLE;
272 }
273 }
274 r9 = arg.ret;
275 status = arg.status;
276 if (r9 == 0) {
277 spin_lock_irqsave(&sal_queue_lock, flags);
278 list_del(&e->list);
279 spin_unlock_irqrestore(&sal_queue_lock, flags);
280 xfree(e);
281 }
282 } else {
283 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
284 r9 = 0;
285 }
286 break;
287 case SAL_GET_STATE_INFO_SIZE:
288 r9 = ia64_sal_get_state_info_size(in1);
289 break;
290 case SAL_CLEAR_STATE_INFO:
291 if (current->domain == dom0) {
292 sal_queue_entry_t *e;
293 unsigned long flags;
294 struct smp_call_args_t arg;
296 spin_lock_irqsave(&sal_queue_lock, flags);
297 if (list_empty(&sal_queue[in1])) {
298 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
299 "no sal_queue entry found.\n",
300 rec_name[in1]);
301 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
302 r9 = 0;
303 spin_unlock_irqrestore(&sal_queue_lock, flags);
304 break;
305 }
306 e = list_entry(sal_queue[in1].next,
307 sal_queue_entry_t, list);
309 list_del(&e->list);
310 spin_unlock_irqrestore(&sal_queue_lock, flags);
312 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
313 "on CPU#%d.\n",
314 rec_name[e->sal_info_type],
315 rec_name[in1], e->cpuid);
318 arg.type = e->sal_info_type;
319 arg.status = 0;
320 if (e->cpuid == smp_processor_id()) {
321 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
322 clear_state_info_on(&arg);
323 } else {
324 int ret;
325 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
326 ret = smp_call_function_single(e->cpuid,
327 clear_state_info_on, &arg, 0, 1);
328 if (ret < 0) {
329 printk("sal_emulator: "
330 "SAL_CLEAR_STATE_INFO "
331 "smp_call_function_single error:"
332 " %d\n", ret);
333 arg.ret = 0;
334 arg.status =
335 IA64_SAL_NO_INFORMATION_AVAILABLE;
336 }
337 }
338 r9 = arg.ret;
339 status = arg.status;
340 xfree(e);
341 }
342 break;
343 case SAL_MC_RENDEZ:
344 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
345 break;
346 case SAL_MC_SET_PARAMS:
347 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
348 BUG();
349 sal_mc_params[in1].param_type = in1;
350 sal_mc_params[in1].i_or_m = in2;
351 sal_mc_params[in1].i_or_m_val = in3;
352 sal_mc_params[in1].timeout = in4;
353 sal_mc_params[in1].rz_always = in5;
354 break;
355 case SAL_CACHE_FLUSH:
356 if (1) {
357 /* Flush using SAL.
358 This method is faster but has a side effect on
359 other vcpu running on this cpu. */
360 status = ia64_sal_cache_flush (in1);
361 }
362 else {
363 /* Flush with fc all the domain.
364 This method is slower but has no side effects. */
365 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
366 status = 0;
367 }
368 break;
369 case SAL_CACHE_INIT:
370 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
371 break;
372 case SAL_UPDATE_PAL:
373 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
374 break;
375 case SAL_XEN_SAL_RETURN:
376 if (!test_and_set_bit(_VPF_down, &current->pause_flags))
377 vcpu_sleep_nosync(current);
378 break;
379 case SN_SAL_GET_MASTER_NASID:
380 status = -1;
381 if (current->domain == dom0) {
382 /* printk("*** Emulating SN_SAL_GET_MASTER_NASID ***\n"); */
383 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_MASTER_NASID,
384 0, 0, 0, 0, 0, 0, 0);
385 status = ret_stuff.status;
386 r9 = ret_stuff.v0;
387 r10 = ret_stuff.v1;
388 r11 = ret_stuff.v2;
389 }
390 break;
391 case SN_SAL_GET_KLCONFIG_ADDR:
392 status = -1;
393 if (current->domain == dom0) {
394 /* printk("*** Emulating SN_SAL_GET_KLCONFIG_ADDR ***\n"); */
395 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR,
396 in1, 0, 0, 0, 0, 0, 0);
397 status = ret_stuff.status;
398 r9 = ret_stuff.v0;
399 r10 = ret_stuff.v1;
400 r11 = ret_stuff.v2;
401 }
402 break;
403 case SN_SAL_GET_SAPIC_INFO:
404 status = -1;
405 if (current->domain == dom0) {
406 /* printk("*** Emulating SN_SAL_GET_SAPIC_INFO ***\n"); */
407 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO,
408 in1, 0, 0, 0, 0, 0, 0);
409 status = ret_stuff.status;
410 r9 = ret_stuff.v0;
411 r10 = ret_stuff.v1;
412 r11 = ret_stuff.v2;
413 }
414 break;
415 case SN_SAL_GET_SN_INFO:
416 status = -1;
417 if (current->domain == dom0) {
418 /* printk("*** Emulating SN_SAL_GET_SN_INFO ***\n"); */
419 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO,
420 in1, 0, 0, 0, 0, 0, 0);
421 status = ret_stuff.status;
422 r9 = ret_stuff.v0;
423 r10 = ret_stuff.v1;
424 r11 = ret_stuff.v2;
425 }
426 break;
427 case SN_SAL_IOIF_GET_HUBDEV_INFO:
428 status = -1;
429 if (current->domain == dom0) {
430 /* printk("*** Emulating SN_SAL_IOIF_GET_HUBDEV_INFO ***\n"); */
431 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_HUBDEV_INFO,
432 in1, in2, 0, 0, 0, 0, 0);
433 status = ret_stuff.status;
434 r9 = ret_stuff.v0;
435 r10 = ret_stuff.v1;
436 r11 = ret_stuff.v2;
437 }
438 break;
439 case SN_SAL_IOIF_INIT:
440 status = -1;
441 if (current->domain == dom0) {
442 /* printk("*** Emulating SN_SAL_IOIF_INIT ***\n"); */
443 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_INIT,
444 0, 0, 0, 0, 0, 0, 0);
445 status = ret_stuff.status;
446 r9 = ret_stuff.v0;
447 r10 = ret_stuff.v1;
448 r11 = ret_stuff.v2;
449 }
450 break;
451 case SN_SAL_GET_PROM_FEATURE_SET:
452 status = -1;
453 if (current->domain == dom0) {
454 /* printk("*** Emulating SN_SAL_GET_PROM_FEATURE_SET ***\n"); */
455 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_PROM_FEATURE_SET,
456 in1, 0, 0, 0, 0, 0, 0);
457 status = ret_stuff.status;
458 r9 = ret_stuff.v0;
459 r10 = ret_stuff.v1;
460 r11 = ret_stuff.v2;
461 }
462 break;
463 case SN_SAL_SET_OS_FEATURE_SET:
464 status = -1;
465 if (current->domain == dom0) {
466 /* printk("*** Emulating SN_SAL_SET_OS_FEATURE_SET ***\n"); */
467 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_OS_FEATURE_SET,
468 in1, 0, 0, 0, 0, 0, 0);
469 status = ret_stuff.status;
470 r9 = ret_stuff.v0;
471 r10 = ret_stuff.v1;
472 r11 = ret_stuff.v2;
473 }
474 break;
475 case SN_SAL_SET_ERROR_HANDLING_FEATURES:
476 status = -1;
477 if (current->domain == dom0) {
478 /* printk("*** Emulating SN_SAL_SET_ERROR_HANDLING_FEATURES ***\n"); */
479 SAL_CALL_NOLOCK(ret_stuff,
480 SN_SAL_SET_ERROR_HANDLING_FEATURES,
481 in1, 0, 0, 0, 0, 0, 0);
482 status = ret_stuff.status;
483 r9 = ret_stuff.v0;
484 r10 = ret_stuff.v1;
485 r11 = ret_stuff.v2;
486 }
487 break;
488 #if 0
489 /*
490 * Somehow ACPI breaks if allowing this one
491 */
492 case SN_SAL_SET_CPU_NUMBER:
493 status = -1;
494 if (current->domain == dom0) {
495 printk("*** Emulating SN_SAL_SET_CPU_NUMBER ***\n");
496 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_CPU_NUMBER,
497 in1, 0, 0, 0, 0, 0, 0);
498 status = ret_stuff.status;
499 r9 = ret_stuff.v0;
500 r10 = ret_stuff.v1;
501 r11 = ret_stuff.v2;
502 }
503 break;
504 #endif
505 case SN_SAL_LOG_CE:
506 status = -1;
507 if (current->domain == dom0) {
508 static int log_ce = 0;
509 if (!log_ce) {
510 printk("*** Emulating SN_SAL_LOG_CE *** "
511 " this will only be printed once\n");
512 log_ce = 1;
513 }
514 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE,
515 0, 0, 0, 0, 0, 0, 0);
516 status = ret_stuff.status;
517 r9 = ret_stuff.v0;
518 r10 = ret_stuff.v1;
519 r11 = ret_stuff.v2;
520 }
521 break;
522 case SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST:
523 status = -1;
524 if (current->domain == dom0) {
525 struct sn_flush_device_common flush;
526 int flush_size;
528 flush_size = sizeof(struct sn_flush_device_common);
529 memset(&flush, 0, flush_size);
530 SAL_CALL_NOLOCK(ret_stuff,
531 SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
532 in1, in2, in3, &flush, 0, 0, 0);
533 #if 0
534 printk("*** Emulating "
535 "SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST ***\n");
536 #endif
537 if (ret_stuff.status == SALRET_OK) {
538 XEN_GUEST_HANDLE(void) handle =
539 *(XEN_GUEST_HANDLE(void)*)&in4;
540 if (copy_to_guest(handle, &flush, 1)) {
541 printk("SN_SAL_IOIF_GET_DEVICE_"
542 "DMAFLUSH_LIST can't copy "
543 "to user!\n");
544 ret_stuff.status = SALRET_ERROR;
545 }
546 }
548 status = ret_stuff.status;
549 r9 = ret_stuff.v0;
550 r10 = ret_stuff.v1;
551 r11 = ret_stuff.v2;
552 }
553 break;
554 default:
555 printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). "
556 "IGNORED...\n", index);
557 status = -1;
558 break;
559 }
560 return ((struct sal_ret_values) {status, r9, r10, r11});
561 }
563 cpumask_t cpu_cache_coherent_map;
565 struct cache_flush_args {
566 u64 cache_type;
567 u64 operation;
568 u64 progress;
569 long status;
570 };
572 static void
573 remote_pal_cache_flush(void *v)
574 {
575 struct cache_flush_args *args = v;
576 long status;
577 u64 progress = args->progress;
579 status = ia64_pal_cache_flush(args->cache_type, args->operation,
580 &progress, NULL);
581 if (status != 0)
582 args->status = status;
583 }
585 struct ia64_pal_retval
586 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
587 {
588 unsigned long r9 = 0;
589 unsigned long r10 = 0;
590 unsigned long r11 = 0;
591 long status = PAL_STATUS_UNIMPLEMENTED;
592 unsigned long flags;
593 int processor;
595 if (unlikely(running_on_sim))
596 return pal_emulator_static(index);
598 // pal code must be mapped by a TR when pal is called, however
599 // calls are rare enough that we will map it lazily rather than
600 // at every context switch
601 //efi_map_pal_code();
602 switch (index) {
603 case PAL_MEM_ATTRIB:
604 status = ia64_pal_mem_attrib(&r9);
605 break;
606 case PAL_FREQ_BASE:
607 status = ia64_pal_freq_base(&r9);
608 if (status == PAL_STATUS_UNIMPLEMENTED) {
609 status = ia64_sal_freq_base(0, &r9, &r10);
610 r10 = 0;
611 }
612 break;
613 case PAL_PROC_GET_FEATURES:
614 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
615 break;
616 case PAL_BUS_GET_FEATURES:
617 status = ia64_pal_bus_get_features(
618 (pal_bus_features_u_t *) &r9,
619 (pal_bus_features_u_t *) &r10,
620 (pal_bus_features_u_t *) &r11);
621 break;
622 case PAL_FREQ_RATIOS:
623 status = ia64_pal_freq_ratios(
624 (struct pal_freq_ratio *) &r9,
625 (struct pal_freq_ratio *) &r10,
626 (struct pal_freq_ratio *) &r11);
627 break;
628 case PAL_PTCE_INFO:
629 /*
630 * return hard-coded xen-specific values because ptc.e
631 * is emulated on xen to always flush everything
632 * these values result in only one ptc.e instruction
633 */
634 status = PAL_STATUS_SUCCESS;
635 r10 = (1L << 32) | 1L;
636 break;
637 case PAL_VERSION:
638 status = ia64_pal_version(
639 (pal_version_u_t *) &r9,
640 (pal_version_u_t *) &r10);
641 break;
642 case PAL_VM_PAGE_SIZE:
643 status = ia64_pal_vm_page_size(&r9,&r10);
644 break;
645 case PAL_DEBUG_INFO:
646 status = ia64_pal_debug_info(&r9,&r10);
647 break;
648 case PAL_CACHE_SUMMARY:
649 status = ia64_pal_cache_summary(&r9,&r10);
650 break;
651 case PAL_VM_SUMMARY:
652 if (VMX_DOMAIN(current)) {
653 pal_vm_info_1_u_t v1;
654 pal_vm_info_2_u_t v2;
655 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
656 (pal_vm_info_2_u_t *)&v2);
657 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
658 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
659 v2.pal_vm_info_2_s.impl_va_msb -= 1;
660 v2.pal_vm_info_2_s.rid_size =
661 current->domain->arch.rid_bits;
662 r9 = v1.pvi1_val;
663 r10 = v2.pvi2_val;
664 } else {
665 /* Use xen-specific values.
666 hash_tag_id is somewhat random! */
667 static const pal_vm_info_1_u_t v1 =
668 {.pal_vm_info_1_s =
669 { .vw = 1,
670 .phys_add_size = 44,
671 .key_size = 16,
672 .max_pkr = 15,
673 .hash_tag_id = 0x30,
674 .max_dtr_entry = NDTRS - 1,
675 .max_itr_entry = NITRS - 1,
676 .max_unique_tcs = 3,
677 .num_tc_levels = 2
678 }};
679 pal_vm_info_2_u_t v2;
680 v2.pvi2_val = 0;
681 v2.pal_vm_info_2_s.rid_size =
682 current->domain->arch.rid_bits;
683 v2.pal_vm_info_2_s.impl_va_msb = 50;
684 r9 = v1.pvi1_val;
685 r10 = v2.pvi2_val;
686 status = PAL_STATUS_SUCCESS;
687 }
688 break;
689 case PAL_VM_INFO:
690 if (VMX_DOMAIN(current)) {
691 status = ia64_pal_vm_info(in1, in2,
692 (pal_tc_info_u_t *)&r9, &r10);
693 break;
694 }
695 if (in1 == 0 && in2 == 2) {
696 /* Level 1: VHPT */
697 const pal_tc_info_u_t v =
698 { .pal_tc_info_s = {.num_sets = 128,
699 .associativity = 1,
700 .num_entries = 128,
701 .pf = 1,
702 .unified = 1,
703 .reduce_tr = 0,
704 .reserved = 0}};
705 r9 = v.pti_val;
706 /* Only support PAGE_SIZE tc. */
707 r10 = PAGE_SIZE;
708 status = PAL_STATUS_SUCCESS;
709 }
710 else if (in1 == 1 && (in2 == 1 || in2 == 2)) {
711 /* Level 2: itlb/dtlb, 1 entry. */
712 const pal_tc_info_u_t v =
713 { .pal_tc_info_s = {.num_sets = 1,
714 .associativity = 1,
715 .num_entries = 1,
716 .pf = 1,
717 .unified = 0,
718 .reduce_tr = 0,
719 .reserved = 0}};
720 r9 = v.pti_val;
721 /* Only support PAGE_SIZE tc. */
722 r10 = PAGE_SIZE;
723 status = PAL_STATUS_SUCCESS;
724 } else
725 status = PAL_STATUS_EINVAL;
726 break;
727 case PAL_RSE_INFO:
728 status = ia64_pal_rse_info(&r9, (pal_hints_u_t *)&r10);
729 break;
730 case PAL_REGISTER_INFO:
731 status = ia64_pal_register_info(in1, &r9, &r10);
732 break;
733 case PAL_CACHE_FLUSH:
734 if (in3 != 0) /* Initially progress_indicator must be 0 */
735 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
736 "progress_indicator=%lx", in3);
738 /* Always call Host Pal in int=0 */
739 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
741 if (in1 != PAL_CACHE_TYPE_COHERENT) {
742 struct cache_flush_args args = {
743 .cache_type = in1,
744 .operation = in2,
745 .progress = 0,
746 .status = 0
747 };
748 smp_call_function(remote_pal_cache_flush,
749 (void *)&args, 1, 1);
750 if (args.status != 0)
751 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
752 "remote status %lx", args.status);
753 }
755 /*
756 * Call Host PAL cache flush
757 * Clear psr.ic when call PAL_CACHE_FLUSH
758 */
759 r10 = in3;
760 local_irq_save(flags);
761 processor = current->processor;
762 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
763 local_irq_restore(flags);
765 if (status != 0)
766 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
767 "status %lx", status);
769 if (in1 == PAL_CACHE_TYPE_COHERENT) {
770 cpus_setall(current->arch.cache_coherent_map);
771 cpu_clear(processor, current->arch.cache_coherent_map);
772 cpus_setall(cpu_cache_coherent_map);
773 cpu_clear(processor, cpu_cache_coherent_map);
774 }
775 break;
776 case PAL_PERF_MON_INFO:
777 {
778 unsigned long pm_buffer[16];
779 status = ia64_pal_perf_mon_info(
780 pm_buffer,
781 (pal_perf_mon_info_u_t *) &r9);
782 if (status != 0) {
783 while(1)
784 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
785 break;
786 }
787 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
788 while(1)
789 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
790 "can't copy to user!!!!\n");
791 status = PAL_STATUS_UNIMPLEMENTED;
792 break;
793 }
794 }
795 break;
796 case PAL_CACHE_INFO:
797 {
798 pal_cache_config_info_t ci;
799 status = ia64_pal_cache_config_info(in1,in2,&ci);
800 if (status != 0)
801 break;
802 r9 = ci.pcci_info_1.pcci1_data;
803 r10 = ci.pcci_info_2.pcci2_data;
804 }
805 break;
806 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
807 printk("%s: PAL_VM_TR_READ unimplmented, ignored\n", __func__);
808 break;
809 case PAL_HALT_INFO:
810 {
811 /* 1000 cycles to enter/leave low power state,
812 consumes 10 mW, implemented and cache/TLB coherent. */
813 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
814 | (1UL << 61) | (1UL << 60);
815 if (copy_to_user ((void *)in1, &res, sizeof (res)))
816 status = PAL_STATUS_EINVAL;
817 else
818 status = PAL_STATUS_SUCCESS;
819 }
820 break;
821 case PAL_HALT:
822 if (current->domain == dom0) {
823 printk ("Domain0 halts the machine\n");
824 console_start_sync();
825 (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
826 } else {
827 set_bit(_VPF_down, &current->pause_flags);
828 vcpu_sleep_nosync(current);
829 status = PAL_STATUS_SUCCESS;
830 }
831 break;
832 case PAL_HALT_LIGHT:
833 if (VMX_DOMAIN(current)) {
834 /* Called by VTI. */
835 if (!is_unmasked_irq(current)) {
836 do_sched_op_compat(SCHEDOP_block, 0);
837 do_softirq();
838 }
839 status = PAL_STATUS_SUCCESS;
840 }
841 break;
842 case PAL_PLATFORM_ADDR:
843 if (VMX_DOMAIN(current))
844 status = PAL_STATUS_SUCCESS;
845 break;
846 case PAL_FIXED_ADDR:
847 status = PAL_STATUS_SUCCESS;
848 r9 = current->vcpu_id;
849 break;
850 case PAL_LOGICAL_TO_PHYSICAL:
851 /* Optional, no need to complain about being unimplemented */
852 break;
853 default:
854 printk("%s: Unimplemented PAL Call %lu\n", __func__, index);
855 break;
856 }
857 return ((struct ia64_pal_retval) {status, r9, r10, r11});
858 }
860 // given a current domain (virtual or metaphysical) address, return the virtual address
861 static unsigned long
862 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
863 struct page_info** page)
864 {
865 struct vcpu *v = current;
866 unsigned long mpaddr = domain_addr;
867 unsigned long virt;
868 *fault = IA64_NO_FAULT;
870 again:
871 if (v->domain->arch.sal_data->efi_virt_mode) {
872 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
873 if (*fault != IA64_NO_FAULT) return 0;
874 }
876 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
877 *page = virt_to_page(virt);
878 if (get_page(*page, current->domain) == 0) {
879 if (page_get_owner(*page) != current->domain) {
880 // which code is appropriate?
881 *fault = IA64_FAULT;
882 return 0;
883 }
884 goto again;
885 }
887 return virt;
888 }
890 static efi_status_t
891 efi_emulate_get_time(
892 unsigned long tv_addr, unsigned long tc_addr,
893 IA64FAULT *fault)
894 {
895 unsigned long tv, tc = 0;
896 struct page_info *tv_page = NULL;
897 struct page_info *tc_page = NULL;
898 efi_status_t status = 0;
899 efi_time_t *tvp;
900 struct tm timeptr;
901 unsigned long xtimesec;
903 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
904 if (*fault != IA64_NO_FAULT)
905 goto errout;
906 if (tc_addr) {
907 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
908 if (*fault != IA64_NO_FAULT)
909 goto errout;
910 }
912 spin_lock(&efi_time_services_lock);
913 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
914 tvp = (efi_time_t *)tv;
915 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
916 tvp->minute, tvp->second);
917 xtimesec += current->domain->time_offset_seconds;
918 timeptr = gmtime(xtimesec);
919 tvp->second = timeptr.tm_sec;
920 tvp->minute = timeptr.tm_min;
921 tvp->hour = timeptr.tm_hour;
922 tvp->day = timeptr.tm_mday;
923 tvp->month = timeptr.tm_mon + 1;
924 tvp->year = timeptr.tm_year + 1900;
925 spin_unlock(&efi_time_services_lock);
927 errout:
928 if (tc_page != NULL)
929 put_page(tc_page);
930 if (tv_page != NULL)
931 put_page(tv_page);
933 return status;
934 }
936 static efi_status_t
937 efi_emulate_set_time(
938 unsigned long tv_addr, IA64FAULT *fault)
939 {
940 unsigned long tv;
941 struct page_info *tv_page = NULL;
942 efi_status_t status = 0;
944 if (current->domain != dom0)
945 return EFI_UNSUPPORTED;
947 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
948 if (*fault != IA64_NO_FAULT)
949 goto errout;
951 spin_lock(&efi_time_services_lock);
952 status = (*efi.set_time)((efi_time_t *)tv);
953 spin_unlock(&efi_time_services_lock);
955 errout:
956 if (tv_page != NULL)
957 put_page(tv_page);
959 return status;
960 }
962 static efi_status_t
963 efi_emulate_get_wakeup_time(
964 unsigned long e_addr, unsigned long p_addr,
965 unsigned long tv_addr, IA64FAULT *fault)
966 {
967 unsigned long enabled, pending, tv;
968 struct page_info *e_page = NULL, *p_page = NULL,
969 *tv_page = NULL;
970 efi_status_t status = 0;
972 if (current->domain != dom0)
973 return EFI_UNSUPPORTED;
975 if (!e_addr || !p_addr || !tv_addr)
976 return EFI_INVALID_PARAMETER;
978 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
979 if (*fault != IA64_NO_FAULT)
980 goto errout;
981 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
982 if (*fault != IA64_NO_FAULT)
983 goto errout;
984 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
985 if (*fault != IA64_NO_FAULT)
986 goto errout;
988 spin_lock(&efi_time_services_lock);
989 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
990 (efi_bool_t *)pending,
991 (efi_time_t *)tv);
992 spin_unlock(&efi_time_services_lock);
994 errout:
995 if (e_page != NULL)
996 put_page(e_page);
997 if (p_page != NULL)
998 put_page(p_page);
999 if (tv_page != NULL)
1000 put_page(tv_page);
1002 return status;
1005 static efi_status_t
1006 efi_emulate_set_wakeup_time(
1007 unsigned long enabled, unsigned long tv_addr,
1008 IA64FAULT *fault)
1010 unsigned long tv = 0;
1011 struct page_info *tv_page = NULL;
1012 efi_status_t status = 0;
1014 if (current->domain != dom0)
1015 return EFI_UNSUPPORTED;
1017 if (tv_addr) {
1018 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
1019 if (*fault != IA64_NO_FAULT)
1020 goto errout;
1023 spin_lock(&efi_time_services_lock);
1024 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
1025 (efi_time_t *)tv);
1026 spin_unlock(&efi_time_services_lock);
1028 errout:
1029 if (tv_page != NULL)
1030 put_page(tv_page);
1032 return status;
1035 static efi_status_t
1036 efi_emulate_get_variable(
1037 unsigned long name_addr, unsigned long vendor_addr,
1038 unsigned long attr_addr, unsigned long data_size_addr,
1039 unsigned long data_addr, IA64FAULT *fault)
1041 unsigned long name, vendor, attr = 0, data_size, data;
1042 struct page_info *name_page = NULL, *vendor_page = NULL,
1043 *attr_page = NULL, *data_size_page = NULL,
1044 *data_page = NULL;
1045 efi_status_t status = 0;
1047 if (current->domain != dom0)
1048 return EFI_UNSUPPORTED;
1050 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1051 if (*fault != IA64_NO_FAULT)
1052 goto errout;
1053 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1054 if (*fault != IA64_NO_FAULT)
1055 goto errout;
1056 data_size = efi_translate_domain_addr(data_size_addr, fault,
1057 &data_size_page);
1058 if (*fault != IA64_NO_FAULT)
1059 goto errout;
1060 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1061 if (*fault != IA64_NO_FAULT)
1062 goto errout;
1063 if (attr_addr) {
1064 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
1065 if (*fault != IA64_NO_FAULT)
1066 goto errout;
1069 status = (*efi.get_variable)((efi_char16_t *)name,
1070 (efi_guid_t *)vendor,
1071 (u32 *)attr,
1072 (unsigned long *)data_size,
1073 (void *)data);
1075 errout:
1076 if (name_page != NULL)
1077 put_page(name_page);
1078 if (vendor_page != NULL)
1079 put_page(vendor_page);
1080 if (attr_page != NULL)
1081 put_page(attr_page);
1082 if (data_size_page != NULL)
1083 put_page(data_size_page);
1084 if (data_page != NULL)
1085 put_page(data_page);
1087 return status;
1090 static efi_status_t
1091 efi_emulate_get_next_variable(
1092 unsigned long name_size_addr, unsigned long name_addr,
1093 unsigned long vendor_addr, IA64FAULT *fault)
1095 unsigned long name_size, name, vendor;
1096 struct page_info *name_size_page = NULL, *name_page = NULL,
1097 *vendor_page = NULL;
1098 efi_status_t status = 0;
1100 if (current->domain != dom0)
1101 return EFI_UNSUPPORTED;
1103 name_size = efi_translate_domain_addr(name_size_addr, fault,
1104 &name_size_page);
1105 if (*fault != IA64_NO_FAULT)
1106 goto errout;
1107 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1108 if (*fault != IA64_NO_FAULT)
1109 goto errout;
1110 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1111 if (*fault != IA64_NO_FAULT)
1112 goto errout;
1114 status = (*efi.get_next_variable)((unsigned long *)name_size,
1115 (efi_char16_t *)name,
1116 (efi_guid_t *)vendor);
1118 errout:
1119 if (name_size_page != NULL)
1120 put_page(name_size_page);
1121 if (name_page != NULL)
1122 put_page(name_page);
1123 if (vendor_page != NULL)
1124 put_page(vendor_page);
1126 return status;
1129 static efi_status_t
1130 efi_emulate_set_variable(
1131 unsigned long name_addr, unsigned long vendor_addr,
1132 unsigned long attr, unsigned long data_size,
1133 unsigned long data_addr, IA64FAULT *fault)
1135 unsigned long name, vendor, data;
1136 struct page_info *name_page = NULL, *vendor_page = NULL,
1137 *data_page = NULL;
1138 efi_status_t status = 0;
1140 if (current->domain != dom0)
1141 return EFI_UNSUPPORTED;
1143 name = efi_translate_domain_addr(name_addr, fault, &name_page);
1144 if (*fault != IA64_NO_FAULT)
1145 goto errout;
1146 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
1147 if (*fault != IA64_NO_FAULT)
1148 goto errout;
1149 data = efi_translate_domain_addr(data_addr, fault, &data_page);
1150 if (*fault != IA64_NO_FAULT)
1151 goto errout;
1153 status = (*efi.set_variable)((efi_char16_t *)name,
1154 (efi_guid_t *)vendor,
1155 attr,
1156 data_size,
1157 (void *)data);
1159 errout:
1160 if (name_page != NULL)
1161 put_page(name_page);
1162 if (vendor_page != NULL)
1163 put_page(vendor_page);
1164 if (data_page != NULL)
1165 put_page(data_page);
1167 return status;
1170 static efi_status_t
1171 efi_emulate_set_virtual_address_map(
1172 unsigned long memory_map_size, unsigned long descriptor_size,
1173 u32 descriptor_version, efi_memory_desc_t *virtual_map)
1175 void *efi_map_start, *efi_map_end, *p;
1176 efi_memory_desc_t entry, *md = &entry;
1177 u64 efi_desc_size;
1179 unsigned long *vfn;
1180 struct domain *d = current->domain;
1181 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
1182 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
1184 if (descriptor_version != EFI_MEMDESC_VERSION) {
1185 printk ("efi_emulate_set_virtual_address_map: memory "
1186 "descriptor version unmatched (%d vs %d)\n",
1187 (int)descriptor_version, EFI_MEMDESC_VERSION);
1188 return EFI_INVALID_PARAMETER;
1191 if (descriptor_size != sizeof(efi_memory_desc_t)) {
1192 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
1193 return EFI_INVALID_PARAMETER;
1196 if (d->arch.sal_data->efi_virt_mode)
1197 return EFI_UNSUPPORTED;
1199 efi_map_start = virtual_map;
1200 efi_map_end = efi_map_start + memory_map_size;
1201 efi_desc_size = sizeof(efi_memory_desc_t);
1203 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1204 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
1205 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
1206 return EFI_UNSUPPORTED;
1209 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
1210 if (md->type != EFI_PAL_CODE)
1211 continue;
1213 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
1214 do { \
1215 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
1216 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
1217 *vfn++ = 0; \
1218 } while (0)
1220 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1221 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1222 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1223 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1224 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1225 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1226 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1227 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1228 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1229 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1231 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1232 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1233 *vfn = 0;
1234 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1235 break;
1238 /* The virtual address map has been applied. */
1239 d->arch.sal_data->efi_virt_mode = 1;
1241 return EFI_SUCCESS;
1244 efi_status_t
1245 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1247 struct vcpu *v = current;
1248 efi_status_t status;
1250 *fault = IA64_NO_FAULT;
1252 switch (regs->r2) {
1253 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1255 u8 reason;
1256 unsigned long val = vcpu_get_gr(v,32);
1257 switch (val)
1259 case EFI_RESET_SHUTDOWN:
1260 reason = SHUTDOWN_poweroff;
1261 break;
1262 case EFI_RESET_COLD:
1263 case EFI_RESET_WARM:
1264 default:
1265 reason = SHUTDOWN_reboot;
1266 break;
1268 domain_shutdown (current->domain, reason);
1270 status = EFI_UNSUPPORTED;
1271 break;
1272 case FW_HYPERCALL_EFI_GET_TIME:
1273 status = efi_emulate_get_time (
1274 vcpu_get_gr(v,32),
1275 vcpu_get_gr(v,33),
1276 fault);
1277 break;
1278 case FW_HYPERCALL_EFI_SET_TIME:
1279 status = efi_emulate_set_time (
1280 vcpu_get_gr(v,32),
1281 fault);
1282 break;
1283 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1284 status = efi_emulate_get_wakeup_time (
1285 vcpu_get_gr(v,32),
1286 vcpu_get_gr(v,33),
1287 vcpu_get_gr(v,34),
1288 fault);
1289 break;
1290 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1291 status = efi_emulate_set_wakeup_time (
1292 vcpu_get_gr(v,32),
1293 vcpu_get_gr(v,33),
1294 fault);
1295 break;
1296 case FW_HYPERCALL_EFI_GET_VARIABLE:
1297 status = efi_emulate_get_variable (
1298 vcpu_get_gr(v,32),
1299 vcpu_get_gr(v,33),
1300 vcpu_get_gr(v,34),
1301 vcpu_get_gr(v,35),
1302 vcpu_get_gr(v,36),
1303 fault);
1304 break;
1305 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1306 status = efi_emulate_get_next_variable (
1307 vcpu_get_gr(v,32),
1308 vcpu_get_gr(v,33),
1309 vcpu_get_gr(v,34),
1310 fault);
1311 break;
1312 case FW_HYPERCALL_EFI_SET_VARIABLE:
1313 status = efi_emulate_set_variable (
1314 vcpu_get_gr(v,32),
1315 vcpu_get_gr(v,33),
1316 vcpu_get_gr(v,34),
1317 vcpu_get_gr(v,35),
1318 vcpu_get_gr(v,36),
1319 fault);
1320 break;
1321 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1322 status = efi_emulate_set_virtual_address_map (
1323 vcpu_get_gr(v,32),
1324 vcpu_get_gr(v,33),
1325 (u32) vcpu_get_gr(v,34),
1326 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1327 break;
1328 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1329 // FIXME: need fixes in efi.h from 2.6.9
1330 status = EFI_UNSUPPORTED;
1331 break;
1332 default:
1333 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1334 status = EFI_UNSUPPORTED;
1337 return status;
1340 void
1341 do_ssc(unsigned long ssc, struct pt_regs *regs)
1343 unsigned long arg0, arg1, arg2, arg3, retval;
1344 char buf[2];
1345 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1346 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1347 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1349 arg0 = vcpu_get_gr(current,32);
1350 switch(ssc) {
1351 case SSC_PUTCHAR:
1352 buf[0] = arg0;
1353 buf[1] = '\0';
1354 printk(buf);
1355 break;
1356 case SSC_GETCHAR:
1357 retval = ia64_ssc(0,0,0,0,ssc);
1358 vcpu_set_gr(current,8,retval,0);
1359 break;
1360 case SSC_WAIT_COMPLETION:
1361 if (arg0) { // metaphysical address
1363 arg0 = translate_domain_mpaddr(arg0, NULL);
1364 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1365 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1366 /**/ stat->count = last_count;
1367 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1368 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1369 /**/ retval = 0;
1371 else retval = -1L;
1372 vcpu_set_gr(current,8,retval,0);
1373 break;
1374 case SSC_OPEN:
1375 arg1 = vcpu_get_gr(current,33); // access rights
1376 if (!running_on_sim) {
1377 printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n");
1378 arg0 = 0;
1380 if (arg0) { // metaphysical address
1381 arg0 = translate_domain_mpaddr(arg0, NULL);
1382 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1384 else retval = -1L;
1385 vcpu_set_gr(current,8,retval,0);
1386 break;
1387 case SSC_WRITE:
1388 case SSC_READ:
1389 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1390 arg1 = vcpu_get_gr(current,33);
1391 arg2 = vcpu_get_gr(current,34);
1392 arg3 = vcpu_get_gr(current,35);
1393 if (arg2) { // metaphysical address of descriptor
1394 struct ssc_disk_req *req;
1395 unsigned long mpaddr;
1396 long len;
1398 arg2 = translate_domain_mpaddr(arg2, NULL);
1399 req = (struct ssc_disk_req *) __va(arg2);
1400 req->len &= 0xffffffffL; // avoid strange bug
1401 len = req->len;
1402 /**/ last_fd = arg1;
1403 /**/ last_count = len;
1404 mpaddr = req->addr;
1405 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1406 retval = 0;
1407 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1408 // do partial page first
1409 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1410 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1411 len -= req->len; mpaddr += req->len;
1412 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1413 arg3 += req->len; // file offset
1414 /**/ last_stat.fd = last_fd;
1415 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1416 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1418 if (retval >= 0) while (len > 0) {
1419 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1420 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1421 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1422 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1423 arg3 += req->len; // file offset
1424 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1425 /**/ last_stat.fd = last_fd;
1426 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1427 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1429 // set it back to the original value
1430 req->len = last_count;
1432 else retval = -1L;
1433 vcpu_set_gr(current,8,retval,0);
1434 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1435 break;
1436 case SSC_CONNECT_INTERRUPT:
1437 arg1 = vcpu_get_gr(current,33);
1438 arg2 = vcpu_get_gr(current,34);
1439 arg3 = vcpu_get_gr(current,35);
1440 if (!running_on_sim) {
1441 printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n");
1442 break;
1444 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1445 break;
1446 case SSC_NETDEV_PROBE:
1447 vcpu_set_gr(current,8,-1L,0);
1448 break;
1449 default:
1450 panic_domain(regs,
1451 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1452 __func__, ssc, regs->cr_iip, regs->b0);
1453 break;
1455 vcpu_increment_iip(current);