ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 13958:b0aeca575dfb

[IA64] vcpu hot-plug/remove for VTi

Return to SAL added for VTi by adding a new SAL (OEM defined) function.
Using this patch I was able to hot-add/hot-remove under linux (in fact
virtualized hot).

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author awilliam@xenbuild2.aw
date Fri Feb 16 15:49:05 2007 -0700 (2007-02-16)
parents 2b3dd681dbce
children 347fb33790bd
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/xenmca.h>
27 #include <public/sched.h>
28 #include "hpsim_ssc.h"
29 #include <asm/vcpu.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/dom_fw.h>
32 #include <asm/uaccess.h>
33 #include <xen/console.h>
34 #include <xen/hypercall.h>
35 #include <xen/softirq.h>
36 #include <xen/time.h>
38 static DEFINE_SPINLOCK(efi_time_services_lock);
40 extern unsigned long running_on_sim;
42 struct sal_mc_params {
43 u64 param_type;
44 u64 i_or_m;
45 u64 i_or_m_val;
46 u64 timeout;
47 u64 rz_always;
48 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
50 struct sal_vectors {
51 u64 vector_type;
52 u64 handler_addr1;
53 u64 gp1;
54 u64 handler_len1;
55 u64 handler_addr2;
56 u64 gp2;
57 u64 handler_len2;
58 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
60 struct smp_call_args_t {
61 u64 type;
62 u64 ret;
63 u64 target;
64 struct domain *domain;
65 int corrected;
66 int status;
67 void *data;
68 };
70 extern sal_log_record_header_t *sal_record;
71 DEFINE_SPINLOCK(sal_record_lock);
73 extern spinlock_t sal_queue_lock;
75 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
77 #if defined(IA64_SAL_DEBUG_INFO)
78 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
80 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
81 #else
82 # define IA64_SAL_DEBUG(fmt...)
83 #endif
85 void get_state_info_on(void *data) {
86 struct smp_call_args_t *arg = data;
87 int flags;
89 spin_lock_irqsave(&sal_record_lock, flags);
90 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
91 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
92 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
93 rec_name[arg->type], smp_processor_id(), arg->ret);
94 if (arg->corrected) {
95 sal_record->severity = sal_log_severity_corrected;
96 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
97 " force\n", __FUNCTION__);
98 }
99 if (arg->ret > 0) {
100 /*
101 * Save current->domain and set to local(caller) domain for
102 * xencomm_paddr_to_maddr() which calculates maddr from
103 * paddr using mpa value of current->domain.
104 */
105 struct domain *save;
106 save = current->domain;
107 current->domain = arg->domain;
108 if (xencomm_copy_to_guest((void*)arg->target,
109 sal_record, arg->ret, 0)) {
110 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
111 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
112 arg->ret = 0;
113 }
114 /* Restore current->domain to saved value. */
115 current->domain = save;
116 }
117 spin_unlock_irqrestore(&sal_record_lock, flags);
118 }
120 void clear_state_info_on(void *data) {
121 struct smp_call_args_t *arg = data;
123 arg->ret = ia64_sal_clear_state_info(arg->type);
124 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
125 rec_name[arg->type], smp_processor_id(), arg->ret);
127 }
129 struct sal_ret_values
130 sal_emulator (long index, unsigned long in1, unsigned long in2,
131 unsigned long in3, unsigned long in4, unsigned long in5,
132 unsigned long in6, unsigned long in7)
133 {
134 unsigned long r9 = 0;
135 unsigned long r10 = 0;
136 long r11 = 0;
137 long status;
139 status = 0;
140 switch (index) {
141 case SAL_FREQ_BASE:
142 if (!running_on_sim)
143 status = ia64_sal_freq_base(in1,&r9,&r10);
144 else switch (in1) {
145 case SAL_FREQ_BASE_PLATFORM:
146 r9 = 200000000;
147 break;
149 case SAL_FREQ_BASE_INTERVAL_TIMER:
150 r9 = 700000000;
151 break;
153 case SAL_FREQ_BASE_REALTIME_CLOCK:
154 r9 = 1;
155 break;
157 default:
158 status = -1;
159 break;
160 }
161 break;
162 case SAL_PCI_CONFIG_READ:
163 if (current->domain == dom0) {
164 u64 value;
165 // note that args 2&3 are swapped!!
166 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
167 r9 = value;
168 }
169 else
170 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
171 break;
172 case SAL_PCI_CONFIG_WRITE:
173 if (current->domain == dom0) {
174 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
175 (in4 > 1) ||
176 (in2 > 8) || (in2 & (in2-1)))
177 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
178 in1,in4,in2,in3);
179 // note that args are in a different order!!
180 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
181 }
182 else
183 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
184 break;
185 case SAL_SET_VECTORS:
186 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
187 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
188 /* Sanity check: cs_length1 must be 0,
189 second vector is reserved. */
190 status = -2;
191 }
192 else {
193 struct domain *d = current->domain;
194 d->arch.sal_data->boot_rdv_ip = in2;
195 d->arch.sal_data->boot_rdv_r1 = in3;
196 }
197 }
198 else
199 {
200 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
201 BUG();
202 sal_vectors[in1].vector_type = in1;
203 sal_vectors[in1].handler_addr1 = in2;
204 sal_vectors[in1].gp1 = in3;
205 sal_vectors[in1].handler_len1 = in4;
206 sal_vectors[in1].handler_addr2 = in5;
207 sal_vectors[in1].gp2 = in6;
208 sal_vectors[in1].handler_len2 = in7;
209 }
210 break;
211 case SAL_GET_STATE_INFO:
212 if (current->domain == dom0) {
213 sal_queue_entry_t *e;
214 unsigned long flags;
215 struct smp_call_args_t arg;
217 spin_lock_irqsave(&sal_queue_lock, flags);
218 if (!sal_queue || list_empty(&sal_queue[in1])) {
219 sal_log_record_header_t header;
220 XEN_GUEST_HANDLE(void) handle =
221 *(XEN_GUEST_HANDLE(void)*)&in3;
223 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
224 "no sal_queue entry found.\n",
225 rec_name[in1]);
226 memset(&header, 0, sizeof(header));
228 if (copy_to_guest(handle, &header, 1)) {
229 printk("sal_emulator: "
230 "SAL_GET_STATE_INFO can't copy "
231 "empty header to user: 0x%lx\n",
232 in3);
233 }
234 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
235 r9 = 0;
236 spin_unlock_irqrestore(&sal_queue_lock, flags);
237 break;
238 }
239 e = list_entry(sal_queue[in1].next,
240 sal_queue_entry_t, list);
241 spin_unlock_irqrestore(&sal_queue_lock, flags);
243 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
244 "on CPU#%d.\n",
245 rec_name[e->sal_info_type],
246 rec_name[in1], e->cpuid);
248 arg.type = e->sal_info_type;
249 arg.target = in3;
250 arg.corrected = !!((in1 != e->sal_info_type) &&
251 (e->sal_info_type == SAL_INFO_TYPE_MCA));
252 arg.domain = current->domain;
253 arg.status = 0;
255 if (e->cpuid == smp_processor_id()) {
256 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
257 get_state_info_on(&arg);
258 } else {
259 int ret;
260 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
261 ret = smp_call_function_single(e->cpuid,
262 get_state_info_on,
263 &arg, 0, 1);
264 if (ret < 0) {
265 printk("SAL_GET_STATE_INFO "
266 "smp_call_function_single error:"
267 " %d\n", ret);
268 arg.ret = 0;
269 arg.status =
270 IA64_SAL_NO_INFORMATION_AVAILABLE;
271 }
272 }
273 r9 = arg.ret;
274 status = arg.status;
275 if (r9 == 0) {
276 spin_lock_irqsave(&sal_queue_lock, flags);
277 list_del(&e->list);
278 spin_unlock_irqrestore(&sal_queue_lock, flags);
279 xfree(e);
280 }
281 } else {
282 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
283 r9 = 0;
284 }
285 break;
286 case SAL_GET_STATE_INFO_SIZE:
287 r9 = ia64_sal_get_state_info_size(in1);
288 break;
289 case SAL_CLEAR_STATE_INFO:
290 if (current->domain == dom0) {
291 sal_queue_entry_t *e;
292 unsigned long flags;
293 struct smp_call_args_t arg;
295 spin_lock_irqsave(&sal_queue_lock, flags);
296 if (list_empty(&sal_queue[in1])) {
297 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
298 "no sal_queue entry found.\n",
299 rec_name[in1]);
300 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
301 r9 = 0;
302 spin_unlock_irqrestore(&sal_queue_lock, flags);
303 break;
304 }
305 e = list_entry(sal_queue[in1].next,
306 sal_queue_entry_t, list);
308 list_del(&e->list);
309 spin_unlock_irqrestore(&sal_queue_lock, flags);
311 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
312 "on CPU#%d.\n",
313 rec_name[e->sal_info_type],
314 rec_name[in1], e->cpuid);
317 arg.type = e->sal_info_type;
318 arg.status = 0;
319 if (e->cpuid == smp_processor_id()) {
320 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
321 clear_state_info_on(&arg);
322 } else {
323 int ret;
324 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
325 ret = smp_call_function_single(e->cpuid,
326 clear_state_info_on, &arg, 0, 1);
327 if (ret < 0) {
328 printk("sal_emulator: "
329 "SAL_CLEAR_STATE_INFO "
330 "smp_call_function_single error:"
331 " %d\n", ret);
332 arg.ret = 0;
333 arg.status =
334 IA64_SAL_NO_INFORMATION_AVAILABLE;
335 }
336 }
337 r9 = arg.ret;
338 status = arg.status;
339 xfree(e);
340 }
341 break;
342 case SAL_MC_RENDEZ:
343 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
344 break;
345 case SAL_MC_SET_PARAMS:
346 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
347 BUG();
348 sal_mc_params[in1].param_type = in1;
349 sal_mc_params[in1].i_or_m = in2;
350 sal_mc_params[in1].i_or_m_val = in3;
351 sal_mc_params[in1].timeout = in4;
352 sal_mc_params[in1].rz_always = in5;
353 break;
354 case SAL_CACHE_FLUSH:
355 if (1) {
356 /* Flush using SAL.
357 This method is faster but has a side effect on
358 other vcpu running on this cpu. */
359 status = ia64_sal_cache_flush (in1);
360 }
361 else {
362 /* Flush with fc all the domain.
363 This method is slower but has no side effects. */
364 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
365 status = 0;
366 }
367 break;
368 case SAL_CACHE_INIT:
369 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
370 break;
371 case SAL_UPDATE_PAL:
372 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
373 break;
374 case SAL_XEN_SAL_RETURN:
375 if (!test_and_set_bit(_VCPUF_down, &current->vcpu_flags))
376 vcpu_sleep_nosync(current);
377 break;
378 default:
379 printk("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
380 status = -1;
381 break;
382 }
383 return ((struct sal_ret_values) {status, r9, r10, r11});
384 }
386 cpumask_t cpu_cache_coherent_map;
388 struct cache_flush_args {
389 u64 cache_type;
390 u64 operation;
391 u64 progress;
392 long status;
393 };
395 static void
396 remote_pal_cache_flush(void *v)
397 {
398 struct cache_flush_args *args = v;
399 long status;
400 u64 progress = args->progress;
402 status = ia64_pal_cache_flush(args->cache_type, args->operation,
403 &progress, NULL);
404 if (status != 0)
405 args->status = status;
406 }
408 struct ia64_pal_retval
409 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
410 {
411 unsigned long r9 = 0;
412 unsigned long r10 = 0;
413 unsigned long r11 = 0;
414 long status = PAL_STATUS_UNIMPLEMENTED;
416 if (running_on_sim)
417 return pal_emulator_static(index);
419 // pal code must be mapped by a TR when pal is called, however
420 // calls are rare enough that we will map it lazily rather than
421 // at every context switch
422 //efi_map_pal_code();
423 switch (index) {
424 case PAL_MEM_ATTRIB:
425 status = ia64_pal_mem_attrib(&r9);
426 break;
427 case PAL_FREQ_BASE:
428 status = ia64_pal_freq_base(&r9);
429 if (status == PAL_STATUS_UNIMPLEMENTED) {
430 status = ia64_sal_freq_base(0, &r9, &r10);
431 r10 = 0;
432 }
433 break;
434 case PAL_PROC_GET_FEATURES:
435 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
436 break;
437 case PAL_BUS_GET_FEATURES:
438 status = ia64_pal_bus_get_features(
439 (pal_bus_features_u_t *) &r9,
440 (pal_bus_features_u_t *) &r10,
441 (pal_bus_features_u_t *) &r11);
442 break;
443 case PAL_FREQ_RATIOS:
444 status = ia64_pal_freq_ratios(
445 (struct pal_freq_ratio *) &r9,
446 (struct pal_freq_ratio *) &r10,
447 (struct pal_freq_ratio *) &r11);
448 break;
449 case PAL_PTCE_INFO:
450 {
451 // return hard-coded xen-specific values because ptc.e
452 // is emulated on xen to always flush everything
453 // these values result in only one ptc.e instruction
454 status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
455 }
456 break;
457 case PAL_VERSION:
458 status = ia64_pal_version(
459 (pal_version_u_t *) &r9,
460 (pal_version_u_t *) &r10);
461 break;
462 case PAL_VM_PAGE_SIZE:
463 status = ia64_pal_vm_page_size(&r9,&r10);
464 break;
465 case PAL_DEBUG_INFO:
466 status = ia64_pal_debug_info(&r9,&r10);
467 break;
468 case PAL_CACHE_SUMMARY:
469 status = ia64_pal_cache_summary(&r9,&r10);
470 break;
471 case PAL_VM_SUMMARY:
472 if (VMX_DOMAIN(current)) {
473 pal_vm_info_1_u_t v1;
474 pal_vm_info_2_u_t v2;
475 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
476 (pal_vm_info_2_u_t *)&v2);
477 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
478 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
479 v2.pal_vm_info_2_s.impl_va_msb -= 1;
480 v2.pal_vm_info_2_s.rid_size =
481 current->domain->arch.rid_bits;
482 r9 = v1.pvi1_val;
483 r10 = v2.pvi2_val;
484 } else {
485 /* Use xen-specific values.
486 hash_tag_id is somewhat random! */
487 static const pal_vm_info_1_u_t v1 =
488 {.pal_vm_info_1_s =
489 { .vw = 1,
490 .phys_add_size = 44,
491 .key_size = 16,
492 .max_pkr = 15,
493 .hash_tag_id = 0x30,
494 .max_dtr_entry = NDTRS - 1,
495 .max_itr_entry = NITRS - 1,
496 #ifdef VHPT_GLOBAL
497 .max_unique_tcs = 3,
498 .num_tc_levels = 2
499 #else
500 .max_unique_tcs = 2,
501 .num_tc_levels = 1
502 #endif
503 }};
504 pal_vm_info_2_u_t v2;
505 v2.pvi2_val = 0;
506 v2.pal_vm_info_2_s.rid_size =
507 current->domain->arch.rid_bits;
508 v2.pal_vm_info_2_s.impl_va_msb = 50;
509 r9 = v1.pvi1_val;
510 r10 = v2.pvi2_val;
511 status = PAL_STATUS_SUCCESS;
512 }
513 break;
514 case PAL_VM_INFO:
515 if (VMX_DOMAIN(current)) {
516 status = ia64_pal_vm_info(in1, in2,
517 (pal_tc_info_u_t *)&r9, &r10);
518 break;
519 }
520 #ifdef VHPT_GLOBAL
521 if (in1 == 0 && in2 == 2) {
522 /* Level 1: VHPT */
523 const pal_tc_info_u_t v =
524 { .pal_tc_info_s = {.num_sets = 128,
525 .associativity = 1,
526 .num_entries = 128,
527 .pf = 1,
528 .unified = 1,
529 .reduce_tr = 0,
530 .reserved = 0}};
531 r9 = v.pti_val;
532 /* Only support PAGE_SIZE tc. */
533 r10 = PAGE_SIZE;
534 status = PAL_STATUS_SUCCESS;
535 }
536 #endif
537 else if (
538 #ifdef VHPT_GLOBAL
539 in1 == 1 /* Level 2. */
540 #else
541 in1 == 0 /* Level 1. */
542 #endif
543 && (in2 == 1 || in2 == 2))
544 {
545 /* itlb/dtlb, 1 entry. */
546 const pal_tc_info_u_t v =
547 { .pal_tc_info_s = {.num_sets = 1,
548 .associativity = 1,
549 .num_entries = 1,
550 .pf = 1,
551 .unified = 0,
552 .reduce_tr = 0,
553 .reserved = 0}};
554 r9 = v.pti_val;
555 /* Only support PAGE_SIZE tc. */
556 r10 = PAGE_SIZE;
557 status = PAL_STATUS_SUCCESS;
558 }
559 else
560 status = PAL_STATUS_EINVAL;
561 break;
562 case PAL_RSE_INFO:
563 status = ia64_pal_rse_info(
564 &r9,
565 (pal_hints_u_t *) &r10);
566 break;
567 case PAL_REGISTER_INFO:
568 status = ia64_pal_register_info(in1, &r9, &r10);
569 break;
570 case PAL_CACHE_FLUSH:
571 if (in3 != 0) /* Initially progress_indicator must be 0 */
572 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
573 "progress_indicator=%lx", in3);
575 /* Always call Host Pal in int=0 */
576 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
578 if (in1 != PAL_CACHE_TYPE_COHERENT) {
579 struct cache_flush_args args = {
580 .cache_type = in1,
581 .operation = in2,
582 .progress = 0,
583 .status = 0
584 };
585 smp_call_function(remote_pal_cache_flush,
586 (void *)&args, 1, 1);
587 if (args.status != 0)
588 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
589 "remote status %lx", args.status);
590 }
592 /*
593 * Call Host PAL cache flush
594 * Clear psr.ic when call PAL_CACHE_FLUSH
595 */
596 r10 = in3;
597 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
599 if (status != 0)
600 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
601 "status %lx", status);
603 if (in1 == PAL_CACHE_TYPE_COHERENT) {
604 int cpu = current->processor;
605 cpus_setall(current->arch.cache_coherent_map);
606 cpu_clear(cpu, current->arch.cache_coherent_map);
607 cpus_setall(cpu_cache_coherent_map);
608 cpu_clear(cpu, cpu_cache_coherent_map);
609 }
610 break;
611 case PAL_PERF_MON_INFO:
612 {
613 unsigned long pm_buffer[16];
614 status = ia64_pal_perf_mon_info(
615 pm_buffer,
616 (pal_perf_mon_info_u_t *) &r9);
617 if (status != 0) {
618 while(1)
619 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
620 break;
621 }
622 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
623 while(1)
624 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
625 "can't copy to user!!!!\n");
626 status = PAL_STATUS_UNIMPLEMENTED;
627 break;
628 }
629 }
630 break;
631 case PAL_CACHE_INFO:
632 {
633 pal_cache_config_info_t ci;
634 status = ia64_pal_cache_config_info(in1,in2,&ci);
635 if (status != 0) break;
636 r9 = ci.pcci_info_1.pcci1_data;
637 r10 = ci.pcci_info_2.pcci2_data;
638 }
639 break;
640 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
641 printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
642 break;
643 case PAL_HALT_INFO:
644 {
645 /* 1000 cycles to enter/leave low power state,
646 consumes 10 mW, implemented and cache/TLB coherent. */
647 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
648 | (1UL << 61) | (1UL << 60);
649 if (copy_to_user ((void *)in1, &res, sizeof (res)))
650 status = PAL_STATUS_EINVAL;
651 else
652 status = PAL_STATUS_SUCCESS;
653 }
654 break;
655 case PAL_HALT:
656 if (current->domain == dom0) {
657 printk ("Domain0 halts the machine\n");
658 console_start_sync();
659 (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
660 } else {
661 set_bit(_VCPUF_down, &current->vcpu_flags);
662 vcpu_sleep_nosync(current);
663 status = PAL_STATUS_SUCCESS;
664 }
665 break;
666 case PAL_HALT_LIGHT:
667 if (VMX_DOMAIN(current)) {
668 /* Called by VTI. */
669 if (!is_unmasked_irq(current)) {
670 do_sched_op_compat(SCHEDOP_block, 0);
671 do_softirq();
672 }
673 status = PAL_STATUS_SUCCESS;
674 }
675 break;
676 case PAL_PLATFORM_ADDR:
677 if (VMX_DOMAIN(current))
678 status = PAL_STATUS_SUCCESS;
679 break;
680 case PAL_LOGICAL_TO_PHYSICAL:
681 /* Optional, no need to complain about being unimplemented */
682 break;
683 default:
684 printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
685 index);
686 break;
687 }
688 return ((struct ia64_pal_retval) {status, r9, r10, r11});
689 }
691 // given a current domain (virtual or metaphysical) address, return the virtual address
692 static unsigned long
693 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
694 struct page_info** page)
695 {
696 struct vcpu *v = current;
697 unsigned long mpaddr = domain_addr;
698 unsigned long virt;
699 *fault = IA64_NO_FAULT;
701 again:
702 if (v->domain->arch.sal_data->efi_virt_mode) {
703 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
704 if (*fault != IA64_NO_FAULT) return 0;
705 }
707 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
708 *page = virt_to_page(virt);
709 if (get_page(*page, current->domain) == 0) {
710 if (page_get_owner(*page) != current->domain) {
711 // which code is appropriate?
712 *fault = IA64_FAULT;
713 return 0;
714 }
715 goto again;
716 }
718 return virt;
719 }
721 static efi_status_t
722 efi_emulate_get_time(
723 unsigned long tv_addr, unsigned long tc_addr,
724 IA64FAULT *fault)
725 {
726 unsigned long tv, tc = 0;
727 struct page_info *tv_page = NULL;
728 struct page_info *tc_page = NULL;
729 efi_status_t status = 0;
730 efi_time_t *tvp;
731 struct tm timeptr;
732 unsigned long xtimesec;
734 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
735 if (*fault != IA64_NO_FAULT)
736 goto errout;
737 if (tc_addr) {
738 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
739 if (*fault != IA64_NO_FAULT)
740 goto errout;
741 }
743 spin_lock(&efi_time_services_lock);
744 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
745 tvp = (efi_time_t *)tv;
746 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
747 tvp->minute, tvp->second);
748 xtimesec += current->domain->time_offset_seconds;
749 timeptr = gmtime(xtimesec);
750 tvp->second = timeptr.tm_sec;
751 tvp->minute = timeptr.tm_min;
752 tvp->hour = timeptr.tm_hour;
753 tvp->day = timeptr.tm_mday;
754 tvp->month = timeptr.tm_mon + 1;
755 tvp->year = timeptr.tm_year + 1900;
756 spin_unlock(&efi_time_services_lock);
758 errout:
759 if (tc_page != NULL)
760 put_page(tc_page);
761 if (tv_page != NULL)
762 put_page(tv_page);
764 return status;
765 }
767 static efi_status_t
768 efi_emulate_set_time(
769 unsigned long tv_addr, IA64FAULT *fault)
770 {
771 unsigned long tv;
772 struct page_info *tv_page = NULL;
773 efi_status_t status = 0;
775 if (current->domain != dom0)
776 return EFI_UNSUPPORTED;
778 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
779 if (*fault != IA64_NO_FAULT)
780 goto errout;
782 spin_lock(&efi_time_services_lock);
783 status = (*efi.set_time)((efi_time_t *)tv);
784 spin_unlock(&efi_time_services_lock);
786 errout:
787 if (tv_page != NULL)
788 put_page(tv_page);
790 return status;
791 }
793 static efi_status_t
794 efi_emulate_get_wakeup_time(
795 unsigned long e_addr, unsigned long p_addr,
796 unsigned long tv_addr, IA64FAULT *fault)
797 {
798 unsigned long enabled, pending, tv;
799 struct page_info *e_page = NULL, *p_page = NULL,
800 *tv_page = NULL;
801 efi_status_t status = 0;
803 if (current->domain != dom0)
804 return EFI_UNSUPPORTED;
806 if (!e_addr || !p_addr || !tv_addr)
807 return EFI_INVALID_PARAMETER;
809 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
810 if (*fault != IA64_NO_FAULT)
811 goto errout;
812 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
813 if (*fault != IA64_NO_FAULT)
814 goto errout;
815 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
816 if (*fault != IA64_NO_FAULT)
817 goto errout;
819 spin_lock(&efi_time_services_lock);
820 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
821 (efi_bool_t *)pending,
822 (efi_time_t *)tv);
823 spin_unlock(&efi_time_services_lock);
825 errout:
826 if (e_page != NULL)
827 put_page(e_page);
828 if (p_page != NULL)
829 put_page(p_page);
830 if (tv_page != NULL)
831 put_page(tv_page);
833 return status;
834 }
836 static efi_status_t
837 efi_emulate_set_wakeup_time(
838 unsigned long enabled, unsigned long tv_addr,
839 IA64FAULT *fault)
840 {
841 unsigned long tv = 0;
842 struct page_info *tv_page = NULL;
843 efi_status_t status = 0;
845 if (current->domain != dom0)
846 return EFI_UNSUPPORTED;
848 if (tv_addr) {
849 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
850 if (*fault != IA64_NO_FAULT)
851 goto errout;
852 }
854 spin_lock(&efi_time_services_lock);
855 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
856 (efi_time_t *)tv);
857 spin_unlock(&efi_time_services_lock);
859 errout:
860 if (tv_page != NULL)
861 put_page(tv_page);
863 return status;
864 }
866 static efi_status_t
867 efi_emulate_get_variable(
868 unsigned long name_addr, unsigned long vendor_addr,
869 unsigned long attr_addr, unsigned long data_size_addr,
870 unsigned long data_addr, IA64FAULT *fault)
871 {
872 unsigned long name, vendor, attr = 0, data_size, data;
873 struct page_info *name_page = NULL, *vendor_page = NULL,
874 *attr_page = NULL, *data_size_page = NULL,
875 *data_page = NULL;
876 efi_status_t status = 0;
878 if (current->domain != dom0)
879 return EFI_UNSUPPORTED;
881 name = efi_translate_domain_addr(name_addr, fault, &name_page);
882 if (*fault != IA64_NO_FAULT)
883 goto errout;
884 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
885 if (*fault != IA64_NO_FAULT)
886 goto errout;
887 data_size = efi_translate_domain_addr(data_size_addr, fault,
888 &data_size_page);
889 if (*fault != IA64_NO_FAULT)
890 goto errout;
891 data = efi_translate_domain_addr(data_addr, fault, &data_page);
892 if (*fault != IA64_NO_FAULT)
893 goto errout;
894 if (attr_addr) {
895 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
896 if (*fault != IA64_NO_FAULT)
897 goto errout;
898 }
900 status = (*efi.get_variable)((efi_char16_t *)name,
901 (efi_guid_t *)vendor,
902 (u32 *)attr,
903 (unsigned long *)data_size,
904 (void *)data);
906 errout:
907 if (name_page != NULL)
908 put_page(name_page);
909 if (vendor_page != NULL)
910 put_page(vendor_page);
911 if (attr_page != NULL)
912 put_page(attr_page);
913 if (data_size_page != NULL)
914 put_page(data_size_page);
915 if (data_page != NULL)
916 put_page(data_page);
918 return status;
919 }
921 static efi_status_t
922 efi_emulate_get_next_variable(
923 unsigned long name_size_addr, unsigned long name_addr,
924 unsigned long vendor_addr, IA64FAULT *fault)
925 {
926 unsigned long name_size, name, vendor;
927 struct page_info *name_size_page = NULL, *name_page = NULL,
928 *vendor_page = NULL;
929 efi_status_t status = 0;
931 if (current->domain != dom0)
932 return EFI_UNSUPPORTED;
934 name_size = efi_translate_domain_addr(name_size_addr, fault,
935 &name_size_page);
936 if (*fault != IA64_NO_FAULT)
937 goto errout;
938 name = efi_translate_domain_addr(name_addr, fault, &name_page);
939 if (*fault != IA64_NO_FAULT)
940 goto errout;
941 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
942 if (*fault != IA64_NO_FAULT)
943 goto errout;
945 status = (*efi.get_next_variable)((unsigned long *)name_size,
946 (efi_char16_t *)name,
947 (efi_guid_t *)vendor);
949 errout:
950 if (name_size_page != NULL)
951 put_page(name_size_page);
952 if (name_page != NULL)
953 put_page(name_page);
954 if (vendor_page != NULL)
955 put_page(vendor_page);
957 return status;
958 }
960 static efi_status_t
961 efi_emulate_set_variable(
962 unsigned long name_addr, unsigned long vendor_addr,
963 unsigned long attr, unsigned long data_size,
964 unsigned long data_addr, IA64FAULT *fault)
965 {
966 unsigned long name, vendor, data;
967 struct page_info *name_page = NULL, *vendor_page = NULL,
968 *data_page = NULL;
969 efi_status_t status = 0;
971 if (current->domain != dom0)
972 return EFI_UNSUPPORTED;
974 name = efi_translate_domain_addr(name_addr, fault, &name_page);
975 if (*fault != IA64_NO_FAULT)
976 goto errout;
977 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
978 if (*fault != IA64_NO_FAULT)
979 goto errout;
980 data = efi_translate_domain_addr(data_addr, fault, &data_page);
981 if (*fault != IA64_NO_FAULT)
982 goto errout;
984 status = (*efi.set_variable)((efi_char16_t *)name,
985 (efi_guid_t *)vendor,
986 attr,
987 data_size,
988 (void *)data);
990 errout:
991 if (name_page != NULL)
992 put_page(name_page);
993 if (vendor_page != NULL)
994 put_page(vendor_page);
995 if (data_page != NULL)
996 put_page(data_page);
998 return status;
999 }
1001 static efi_status_t
1002 efi_emulate_set_virtual_address_map(
1003 unsigned long memory_map_size, unsigned long descriptor_size,
1004 u32 descriptor_version, efi_memory_desc_t *virtual_map)
1006 void *efi_map_start, *efi_map_end, *p;
1007 efi_memory_desc_t entry, *md = &entry;
1008 u64 efi_desc_size;
1010 unsigned long *vfn;
1011 struct domain *d = current->domain;
1012 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
1013 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
1015 if (descriptor_version != EFI_MEMDESC_VERSION) {
1016 printk ("efi_emulate_set_virtual_address_map: memory "
1017 "descriptor version unmatched (%d vs %d)\n",
1018 (int)descriptor_version, EFI_MEMDESC_VERSION);
1019 return EFI_INVALID_PARAMETER;
1022 if (descriptor_size != sizeof(efi_memory_desc_t)) {
1023 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
1024 return EFI_INVALID_PARAMETER;
1027 if (d->arch.sal_data->efi_virt_mode)
1028 return EFI_UNSUPPORTED;
1030 efi_map_start = virtual_map;
1031 efi_map_end = efi_map_start + memory_map_size;
1032 efi_desc_size = sizeof(efi_memory_desc_t);
1034 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1035 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
1036 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
1037 return EFI_UNSUPPORTED;
1040 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
1041 if (md->type != EFI_PAL_CODE)
1042 continue;
1044 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
1045 do { \
1046 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
1047 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
1048 *vfn++ = 0; \
1049 } while (0)
1051 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1052 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1053 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1054 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1055 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1056 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1057 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1058 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1059 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1060 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1062 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1063 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1064 *vfn = 0;
1065 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1066 break;
1069 /* The virtual address map has been applied. */
1070 d->arch.sal_data->efi_virt_mode = 1;
1072 return EFI_SUCCESS;
1075 efi_status_t
1076 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1078 struct vcpu *v = current;
1079 efi_status_t status;
1081 *fault = IA64_NO_FAULT;
1083 switch (regs->r2) {
1084 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1086 u8 reason;
1087 unsigned long val = vcpu_get_gr(v,32);
1088 switch (val)
1090 case EFI_RESET_SHUTDOWN:
1091 reason = SHUTDOWN_poweroff;
1092 break;
1093 case EFI_RESET_COLD:
1094 case EFI_RESET_WARM:
1095 default:
1096 reason = SHUTDOWN_reboot;
1097 break;
1099 domain_shutdown (current->domain, reason);
1101 status = EFI_UNSUPPORTED;
1102 break;
1103 case FW_HYPERCALL_EFI_GET_TIME:
1104 status = efi_emulate_get_time (
1105 vcpu_get_gr(v,32),
1106 vcpu_get_gr(v,33),
1107 fault);
1108 break;
1109 case FW_HYPERCALL_EFI_SET_TIME:
1110 status = efi_emulate_set_time (
1111 vcpu_get_gr(v,32),
1112 fault);
1113 break;
1114 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1115 status = efi_emulate_get_wakeup_time (
1116 vcpu_get_gr(v,32),
1117 vcpu_get_gr(v,33),
1118 vcpu_get_gr(v,34),
1119 fault);
1120 break;
1121 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1122 status = efi_emulate_set_wakeup_time (
1123 vcpu_get_gr(v,32),
1124 vcpu_get_gr(v,33),
1125 fault);
1126 break;
1127 case FW_HYPERCALL_EFI_GET_VARIABLE:
1128 status = efi_emulate_get_variable (
1129 vcpu_get_gr(v,32),
1130 vcpu_get_gr(v,33),
1131 vcpu_get_gr(v,34),
1132 vcpu_get_gr(v,35),
1133 vcpu_get_gr(v,36),
1134 fault);
1135 break;
1136 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1137 status = efi_emulate_get_next_variable (
1138 vcpu_get_gr(v,32),
1139 vcpu_get_gr(v,33),
1140 vcpu_get_gr(v,34),
1141 fault);
1142 break;
1143 case FW_HYPERCALL_EFI_SET_VARIABLE:
1144 status = efi_emulate_set_variable (
1145 vcpu_get_gr(v,32),
1146 vcpu_get_gr(v,33),
1147 vcpu_get_gr(v,34),
1148 vcpu_get_gr(v,35),
1149 vcpu_get_gr(v,36),
1150 fault);
1151 break;
1152 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1153 status = efi_emulate_set_virtual_address_map (
1154 vcpu_get_gr(v,32),
1155 vcpu_get_gr(v,33),
1156 (u32) vcpu_get_gr(v,34),
1157 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1158 break;
1159 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1160 // FIXME: need fixes in efi.h from 2.6.9
1161 status = EFI_UNSUPPORTED;
1162 break;
1163 default:
1164 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1165 status = EFI_UNSUPPORTED;
1168 return status;
1171 void
1172 do_ssc(unsigned long ssc, struct pt_regs *regs)
1174 unsigned long arg0, arg1, arg2, arg3, retval;
1175 char buf[2];
1176 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1177 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1178 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1180 arg0 = vcpu_get_gr(current,32);
1181 switch(ssc) {
1182 case SSC_PUTCHAR:
1183 buf[0] = arg0;
1184 buf[1] = '\0';
1185 printk(buf);
1186 break;
1187 case SSC_GETCHAR:
1188 retval = ia64_ssc(0,0,0,0,ssc);
1189 vcpu_set_gr(current,8,retval,0);
1190 break;
1191 case SSC_WAIT_COMPLETION:
1192 if (arg0) { // metaphysical address
1194 arg0 = translate_domain_mpaddr(arg0, NULL);
1195 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1196 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1197 /**/ stat->count = last_count;
1198 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1199 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1200 /**/ retval = 0;
1202 else retval = -1L;
1203 vcpu_set_gr(current,8,retval,0);
1204 break;
1205 case SSC_OPEN:
1206 arg1 = vcpu_get_gr(current,33); // access rights
1207 if (!running_on_sim) { printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
1208 if (arg0) { // metaphysical address
1209 arg0 = translate_domain_mpaddr(arg0, NULL);
1210 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1212 else retval = -1L;
1213 vcpu_set_gr(current,8,retval,0);
1214 break;
1215 case SSC_WRITE:
1216 case SSC_READ:
1217 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1218 arg1 = vcpu_get_gr(current,33);
1219 arg2 = vcpu_get_gr(current,34);
1220 arg3 = vcpu_get_gr(current,35);
1221 if (arg2) { // metaphysical address of descriptor
1222 struct ssc_disk_req *req;
1223 unsigned long mpaddr;
1224 long len;
1226 arg2 = translate_domain_mpaddr(arg2, NULL);
1227 req = (struct ssc_disk_req *) __va(arg2);
1228 req->len &= 0xffffffffL; // avoid strange bug
1229 len = req->len;
1230 /**/ last_fd = arg1;
1231 /**/ last_count = len;
1232 mpaddr = req->addr;
1233 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1234 retval = 0;
1235 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1236 // do partial page first
1237 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1238 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1239 len -= req->len; mpaddr += req->len;
1240 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1241 arg3 += req->len; // file offset
1242 /**/ last_stat.fd = last_fd;
1243 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1244 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1246 if (retval >= 0) while (len > 0) {
1247 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1248 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1249 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1250 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1251 arg3 += req->len; // file offset
1252 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1253 /**/ last_stat.fd = last_fd;
1254 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1255 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1257 // set it back to the original value
1258 req->len = last_count;
1260 else retval = -1L;
1261 vcpu_set_gr(current,8,retval,0);
1262 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1263 break;
1264 case SSC_CONNECT_INTERRUPT:
1265 arg1 = vcpu_get_gr(current,33);
1266 arg2 = vcpu_get_gr(current,34);
1267 arg3 = vcpu_get_gr(current,35);
1268 if (!running_on_sim) { printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
1269 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1270 break;
1271 case SSC_NETDEV_PROBE:
1272 vcpu_set_gr(current,8,-1L,0);
1273 break;
1274 default:
1275 panic_domain(regs,
1276 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1277 __func__, ssc, regs->cr_iip, regs->b0);
1278 break;
1280 vcpu_increment_iip(current);