ia64/xen-unstable

view xen/arch/ia64/xen/fw_emul.c @ 13905:2b3dd681dbce

[IA64] Fix I&D cache incoherency after vcpu migration

Windows on HVM ocasionally crashes with BSOD especially on boot time.
I finally found out the cause is PAL_CACHE_FLUSH(cache_type=4).
The cache_type means an argument of PAL_CACHE_FLUSH and cache_type=4
makes local instruction caches coherent with the data caches.
See SDM vol2 11.10.3, PAL_CACHE_FLUSH.
FYI, Linux never uses cache_type=4.

Currently PAL_CACHE_FLUSH is called on only local cpu and caches on the
other cpus are still incoherent.

Attached patch does:
- When cache_type=1,2,3 that means flushing caches on local cpus,
caches on the other cpus becomes to be flushed also.
It might be overkill and not efficient. But I think it's permissive
since these cache_type are seldom used.

- When cache_type=4, the actual PAL call to the other cpus is deferred
until the vcpu migration occurs or the cpu becomes idle.
Since Windows uses cache_type=4 quite often and many vcpus on SMP
environment call PAL_CACHE_FLUSH simultaneously.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Thu Feb 15 10:25:33 2007 -0700 (2007-02-15)
parents 5982d478698f
children b0aeca575dfb
line source
1 /*
2 * fw_emul.c:
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18 #include <xen/config.h>
19 #include <asm/system.h>
20 #include <asm/pgalloc.h>
22 #include <linux/efi.h>
23 #include <asm/pal.h>
24 #include <asm/sal.h>
25 #include <asm/xenmca.h>
27 #include <public/sched.h>
28 #include "hpsim_ssc.h"
29 #include <asm/vcpu.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/dom_fw.h>
32 #include <asm/uaccess.h>
33 #include <xen/console.h>
34 #include <xen/hypercall.h>
35 #include <xen/softirq.h>
36 #include <xen/time.h>
38 static DEFINE_SPINLOCK(efi_time_services_lock);
40 extern unsigned long running_on_sim;
42 struct sal_mc_params {
43 u64 param_type;
44 u64 i_or_m;
45 u64 i_or_m_val;
46 u64 timeout;
47 u64 rz_always;
48 } sal_mc_params[SAL_MC_PARAM_CPE_INT + 1];
50 struct sal_vectors {
51 u64 vector_type;
52 u64 handler_addr1;
53 u64 gp1;
54 u64 handler_len1;
55 u64 handler_addr2;
56 u64 gp2;
57 u64 handler_len2;
58 } sal_vectors[SAL_VECTOR_OS_BOOT_RENDEZ + 1];
60 struct smp_call_args_t {
61 u64 type;
62 u64 ret;
63 u64 target;
64 struct domain *domain;
65 int corrected;
66 int status;
67 void *data;
68 };
70 extern sal_log_record_header_t *sal_record;
71 DEFINE_SPINLOCK(sal_record_lock);
73 extern spinlock_t sal_queue_lock;
75 #define IA64_SAL_NO_INFORMATION_AVAILABLE -5
77 #if defined(IA64_SAL_DEBUG_INFO)
78 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
80 # define IA64_SAL_DEBUG(fmt...) printk("sal_emulator: " fmt)
81 #else
82 # define IA64_SAL_DEBUG(fmt...)
83 #endif
85 void get_state_info_on(void *data) {
86 struct smp_call_args_t *arg = data;
87 int flags;
89 spin_lock_irqsave(&sal_record_lock, flags);
90 memset(sal_record, 0, ia64_sal_get_state_info_size(arg->type));
91 arg->ret = ia64_sal_get_state_info(arg->type, (u64 *)sal_record);
92 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) on CPU#%d returns %ld.\n",
93 rec_name[arg->type], smp_processor_id(), arg->ret);
94 if (arg->corrected) {
95 sal_record->severity = sal_log_severity_corrected;
96 IA64_SAL_DEBUG("%s: IA64_SAL_CLEAR_STATE_INFO(SAL_INFO_TYPE_MCA)"
97 " force\n", __FUNCTION__);
98 }
99 if (arg->ret > 0) {
100 /*
101 * Save current->domain and set to local(caller) domain for
102 * xencomm_paddr_to_maddr() which calculates maddr from
103 * paddr using mpa value of current->domain.
104 */
105 struct domain *save;
106 save = current->domain;
107 current->domain = arg->domain;
108 if (xencomm_copy_to_guest((void*)arg->target,
109 sal_record, arg->ret, 0)) {
110 printk("SAL_GET_STATE_INFO can't copy to user!!!!\n");
111 arg->status = IA64_SAL_NO_INFORMATION_AVAILABLE;
112 arg->ret = 0;
113 }
114 /* Restore current->domain to saved value. */
115 current->domain = save;
116 }
117 spin_unlock_irqrestore(&sal_record_lock, flags);
118 }
120 void clear_state_info_on(void *data) {
121 struct smp_call_args_t *arg = data;
123 arg->ret = ia64_sal_clear_state_info(arg->type);
124 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) on CPU#%d returns %ld.\n",
125 rec_name[arg->type], smp_processor_id(), arg->ret);
127 }
129 struct sal_ret_values
130 sal_emulator (long index, unsigned long in1, unsigned long in2,
131 unsigned long in3, unsigned long in4, unsigned long in5,
132 unsigned long in6, unsigned long in7)
133 {
134 unsigned long r9 = 0;
135 unsigned long r10 = 0;
136 long r11 = 0;
137 long status;
139 status = 0;
140 switch (index) {
141 case SAL_FREQ_BASE:
142 if (!running_on_sim)
143 status = ia64_sal_freq_base(in1,&r9,&r10);
144 else switch (in1) {
145 case SAL_FREQ_BASE_PLATFORM:
146 r9 = 200000000;
147 break;
149 case SAL_FREQ_BASE_INTERVAL_TIMER:
150 r9 = 700000000;
151 break;
153 case SAL_FREQ_BASE_REALTIME_CLOCK:
154 r9 = 1;
155 break;
157 default:
158 status = -1;
159 break;
160 }
161 break;
162 case SAL_PCI_CONFIG_READ:
163 if (current->domain == dom0) {
164 u64 value;
165 // note that args 2&3 are swapped!!
166 status = ia64_sal_pci_config_read(in1,in3,in2,&value);
167 r9 = value;
168 }
169 else
170 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
171 break;
172 case SAL_PCI_CONFIG_WRITE:
173 if (current->domain == dom0) {
174 if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
175 (in4 > 1) ||
176 (in2 > 8) || (in2 & (in2-1)))
177 printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
178 in1,in4,in2,in3);
179 // note that args are in a different order!!
180 status = ia64_sal_pci_config_write(in1,in4,in2,in3);
181 }
182 else
183 printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
184 break;
185 case SAL_SET_VECTORS:
186 if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
187 if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
188 /* Sanity check: cs_length1 must be 0,
189 second vector is reserved. */
190 status = -2;
191 }
192 else {
193 struct domain *d = current->domain;
194 d->arch.sal_data->boot_rdv_ip = in2;
195 d->arch.sal_data->boot_rdv_r1 = in3;
196 }
197 }
198 else
199 {
200 if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
201 BUG();
202 sal_vectors[in1].vector_type = in1;
203 sal_vectors[in1].handler_addr1 = in2;
204 sal_vectors[in1].gp1 = in3;
205 sal_vectors[in1].handler_len1 = in4;
206 sal_vectors[in1].handler_addr2 = in5;
207 sal_vectors[in1].gp2 = in6;
208 sal_vectors[in1].handler_len2 = in7;
209 }
210 break;
211 case SAL_GET_STATE_INFO:
212 if (current->domain == dom0) {
213 sal_queue_entry_t *e;
214 unsigned long flags;
215 struct smp_call_args_t arg;
217 spin_lock_irqsave(&sal_queue_lock, flags);
218 if (!sal_queue || list_empty(&sal_queue[in1])) {
219 sal_log_record_header_t header;
220 XEN_GUEST_HANDLE(void) handle =
221 *(XEN_GUEST_HANDLE(void)*)&in3;
223 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
224 "no sal_queue entry found.\n",
225 rec_name[in1]);
226 memset(&header, 0, sizeof(header));
228 if (copy_to_guest(handle, &header, 1)) {
229 printk("sal_emulator: "
230 "SAL_GET_STATE_INFO can't copy "
231 "empty header to user: 0x%lx\n",
232 in3);
233 }
234 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
235 r9 = 0;
236 spin_unlock_irqrestore(&sal_queue_lock, flags);
237 break;
238 }
239 e = list_entry(sal_queue[in1].next,
240 sal_queue_entry_t, list);
241 spin_unlock_irqrestore(&sal_queue_lock, flags);
243 IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
244 "on CPU#%d.\n",
245 rec_name[e->sal_info_type],
246 rec_name[in1], e->cpuid);
248 arg.type = e->sal_info_type;
249 arg.target = in3;
250 arg.corrected = !!((in1 != e->sal_info_type) &&
251 (e->sal_info_type == SAL_INFO_TYPE_MCA));
252 arg.domain = current->domain;
253 arg.status = 0;
255 if (e->cpuid == smp_processor_id()) {
256 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
257 get_state_info_on(&arg);
258 } else {
259 int ret;
260 IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
261 ret = smp_call_function_single(e->cpuid,
262 get_state_info_on,
263 &arg, 0, 1);
264 if (ret < 0) {
265 printk("SAL_GET_STATE_INFO "
266 "smp_call_function_single error:"
267 " %d\n", ret);
268 arg.ret = 0;
269 arg.status =
270 IA64_SAL_NO_INFORMATION_AVAILABLE;
271 }
272 }
273 r9 = arg.ret;
274 status = arg.status;
275 if (r9 == 0) {
276 spin_lock_irqsave(&sal_queue_lock, flags);
277 list_del(&e->list);
278 spin_unlock_irqrestore(&sal_queue_lock, flags);
279 xfree(e);
280 }
281 } else {
282 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
283 r9 = 0;
284 }
285 break;
286 case SAL_GET_STATE_INFO_SIZE:
287 r9 = ia64_sal_get_state_info_size(in1);
288 break;
289 case SAL_CLEAR_STATE_INFO:
290 if (current->domain == dom0) {
291 sal_queue_entry_t *e;
292 unsigned long flags;
293 struct smp_call_args_t arg;
295 spin_lock_irqsave(&sal_queue_lock, flags);
296 if (list_empty(&sal_queue[in1])) {
297 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s) "
298 "no sal_queue entry found.\n",
299 rec_name[in1]);
300 status = IA64_SAL_NO_INFORMATION_AVAILABLE;
301 r9 = 0;
302 spin_unlock_irqrestore(&sal_queue_lock, flags);
303 break;
304 }
305 e = list_entry(sal_queue[in1].next,
306 sal_queue_entry_t, list);
308 list_del(&e->list);
309 spin_unlock_irqrestore(&sal_queue_lock, flags);
311 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO(%s <= %s) "
312 "on CPU#%d.\n",
313 rec_name[e->sal_info_type],
314 rec_name[in1], e->cpuid);
317 arg.type = e->sal_info_type;
318 arg.status = 0;
319 if (e->cpuid == smp_processor_id()) {
320 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: local\n");
321 clear_state_info_on(&arg);
322 } else {
323 int ret;
324 IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
325 ret = smp_call_function_single(e->cpuid,
326 clear_state_info_on, &arg, 0, 1);
327 if (ret < 0) {
328 printk("sal_emulator: "
329 "SAL_CLEAR_STATE_INFO "
330 "smp_call_function_single error:"
331 " %d\n", ret);
332 arg.ret = 0;
333 arg.status =
334 IA64_SAL_NO_INFORMATION_AVAILABLE;
335 }
336 }
337 r9 = arg.ret;
338 status = arg.status;
339 xfree(e);
340 }
341 break;
342 case SAL_MC_RENDEZ:
343 printk("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
344 break;
345 case SAL_MC_SET_PARAMS:
346 if (in1 > sizeof(sal_mc_params)/sizeof(sal_mc_params[0]))
347 BUG();
348 sal_mc_params[in1].param_type = in1;
349 sal_mc_params[in1].i_or_m = in2;
350 sal_mc_params[in1].i_or_m_val = in3;
351 sal_mc_params[in1].timeout = in4;
352 sal_mc_params[in1].rz_always = in5;
353 break;
354 case SAL_CACHE_FLUSH:
355 if (1) {
356 /* Flush using SAL.
357 This method is faster but has a side effect on
358 other vcpu running on this cpu. */
359 status = ia64_sal_cache_flush (in1);
360 }
361 else {
362 /* Flush with fc all the domain.
363 This method is slower but has no side effects. */
364 domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
365 status = 0;
366 }
367 break;
368 case SAL_CACHE_INIT:
369 printk("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
370 break;
371 case SAL_UPDATE_PAL:
372 printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
373 break;
374 default:
375 printk("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
376 status = -1;
377 break;
378 }
379 return ((struct sal_ret_values) {status, r9, r10, r11});
380 }
382 cpumask_t cpu_cache_coherent_map;
384 struct cache_flush_args {
385 u64 cache_type;
386 u64 operation;
387 u64 progress;
388 long status;
389 };
391 static void
392 remote_pal_cache_flush(void *v)
393 {
394 struct cache_flush_args *args = v;
395 long status;
396 u64 progress = args->progress;
398 status = ia64_pal_cache_flush(args->cache_type, args->operation,
399 &progress, NULL);
400 if (status != 0)
401 args->status = status;
402 }
404 struct ia64_pal_retval
405 xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
406 {
407 unsigned long r9 = 0;
408 unsigned long r10 = 0;
409 unsigned long r11 = 0;
410 long status = PAL_STATUS_UNIMPLEMENTED;
412 if (running_on_sim)
413 return pal_emulator_static(index);
415 // pal code must be mapped by a TR when pal is called, however
416 // calls are rare enough that we will map it lazily rather than
417 // at every context switch
418 //efi_map_pal_code();
419 switch (index) {
420 case PAL_MEM_ATTRIB:
421 status = ia64_pal_mem_attrib(&r9);
422 break;
423 case PAL_FREQ_BASE:
424 status = ia64_pal_freq_base(&r9);
425 if (status == PAL_STATUS_UNIMPLEMENTED) {
426 status = ia64_sal_freq_base(0, &r9, &r10);
427 r10 = 0;
428 }
429 break;
430 case PAL_PROC_GET_FEATURES:
431 status = ia64_pal_proc_get_features(&r9,&r10,&r11);
432 break;
433 case PAL_BUS_GET_FEATURES:
434 status = ia64_pal_bus_get_features(
435 (pal_bus_features_u_t *) &r9,
436 (pal_bus_features_u_t *) &r10,
437 (pal_bus_features_u_t *) &r11);
438 break;
439 case PAL_FREQ_RATIOS:
440 status = ia64_pal_freq_ratios(
441 (struct pal_freq_ratio *) &r9,
442 (struct pal_freq_ratio *) &r10,
443 (struct pal_freq_ratio *) &r11);
444 break;
445 case PAL_PTCE_INFO:
446 {
447 // return hard-coded xen-specific values because ptc.e
448 // is emulated on xen to always flush everything
449 // these values result in only one ptc.e instruction
450 status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
451 }
452 break;
453 case PAL_VERSION:
454 status = ia64_pal_version(
455 (pal_version_u_t *) &r9,
456 (pal_version_u_t *) &r10);
457 break;
458 case PAL_VM_PAGE_SIZE:
459 status = ia64_pal_vm_page_size(&r9,&r10);
460 break;
461 case PAL_DEBUG_INFO:
462 status = ia64_pal_debug_info(&r9,&r10);
463 break;
464 case PAL_CACHE_SUMMARY:
465 status = ia64_pal_cache_summary(&r9,&r10);
466 break;
467 case PAL_VM_SUMMARY:
468 if (VMX_DOMAIN(current)) {
469 pal_vm_info_1_u_t v1;
470 pal_vm_info_2_u_t v2;
471 status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1,
472 (pal_vm_info_2_u_t *)&v2);
473 v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1;
474 v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1;
475 v2.pal_vm_info_2_s.impl_va_msb -= 1;
476 v2.pal_vm_info_2_s.rid_size =
477 current->domain->arch.rid_bits;
478 r9 = v1.pvi1_val;
479 r10 = v2.pvi2_val;
480 } else {
481 /* Use xen-specific values.
482 hash_tag_id is somewhat random! */
483 static const pal_vm_info_1_u_t v1 =
484 {.pal_vm_info_1_s =
485 { .vw = 1,
486 .phys_add_size = 44,
487 .key_size = 16,
488 .max_pkr = 15,
489 .hash_tag_id = 0x30,
490 .max_dtr_entry = NDTRS - 1,
491 .max_itr_entry = NITRS - 1,
492 #ifdef VHPT_GLOBAL
493 .max_unique_tcs = 3,
494 .num_tc_levels = 2
495 #else
496 .max_unique_tcs = 2,
497 .num_tc_levels = 1
498 #endif
499 }};
500 pal_vm_info_2_u_t v2;
501 v2.pvi2_val = 0;
502 v2.pal_vm_info_2_s.rid_size =
503 current->domain->arch.rid_bits;
504 v2.pal_vm_info_2_s.impl_va_msb = 50;
505 r9 = v1.pvi1_val;
506 r10 = v2.pvi2_val;
507 status = PAL_STATUS_SUCCESS;
508 }
509 break;
510 case PAL_VM_INFO:
511 if (VMX_DOMAIN(current)) {
512 status = ia64_pal_vm_info(in1, in2,
513 (pal_tc_info_u_t *)&r9, &r10);
514 break;
515 }
516 #ifdef VHPT_GLOBAL
517 if (in1 == 0 && in2 == 2) {
518 /* Level 1: VHPT */
519 const pal_tc_info_u_t v =
520 { .pal_tc_info_s = {.num_sets = 128,
521 .associativity = 1,
522 .num_entries = 128,
523 .pf = 1,
524 .unified = 1,
525 .reduce_tr = 0,
526 .reserved = 0}};
527 r9 = v.pti_val;
528 /* Only support PAGE_SIZE tc. */
529 r10 = PAGE_SIZE;
530 status = PAL_STATUS_SUCCESS;
531 }
532 #endif
533 else if (
534 #ifdef VHPT_GLOBAL
535 in1 == 1 /* Level 2. */
536 #else
537 in1 == 0 /* Level 1. */
538 #endif
539 && (in2 == 1 || in2 == 2))
540 {
541 /* itlb/dtlb, 1 entry. */
542 const pal_tc_info_u_t v =
543 { .pal_tc_info_s = {.num_sets = 1,
544 .associativity = 1,
545 .num_entries = 1,
546 .pf = 1,
547 .unified = 0,
548 .reduce_tr = 0,
549 .reserved = 0}};
550 r9 = v.pti_val;
551 /* Only support PAGE_SIZE tc. */
552 r10 = PAGE_SIZE;
553 status = PAL_STATUS_SUCCESS;
554 }
555 else
556 status = PAL_STATUS_EINVAL;
557 break;
558 case PAL_RSE_INFO:
559 status = ia64_pal_rse_info(
560 &r9,
561 (pal_hints_u_t *) &r10);
562 break;
563 case PAL_REGISTER_INFO:
564 status = ia64_pal_register_info(in1, &r9, &r10);
565 break;
566 case PAL_CACHE_FLUSH:
567 if (in3 != 0) /* Initially progress_indicator must be 0 */
568 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
569 "progress_indicator=%lx", in3);
571 /* Always call Host Pal in int=0 */
572 in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
574 if (in1 != PAL_CACHE_TYPE_COHERENT) {
575 struct cache_flush_args args = {
576 .cache_type = in1,
577 .operation = in2,
578 .progress = 0,
579 .status = 0
580 };
581 smp_call_function(remote_pal_cache_flush,
582 (void *)&args, 1, 1);
583 if (args.status != 0)
584 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
585 "remote status %lx", args.status);
586 }
588 /*
589 * Call Host PAL cache flush
590 * Clear psr.ic when call PAL_CACHE_FLUSH
591 */
592 r10 = in3;
593 status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
595 if (status != 0)
596 panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
597 "status %lx", status);
599 if (in1 == PAL_CACHE_TYPE_COHERENT) {
600 int cpu = current->processor;
601 cpus_setall(current->arch.cache_coherent_map);
602 cpu_clear(cpu, current->arch.cache_coherent_map);
603 cpus_setall(cpu_cache_coherent_map);
604 cpu_clear(cpu, cpu_cache_coherent_map);
605 }
606 break;
607 case PAL_PERF_MON_INFO:
608 {
609 unsigned long pm_buffer[16];
610 status = ia64_pal_perf_mon_info(
611 pm_buffer,
612 (pal_perf_mon_info_u_t *) &r9);
613 if (status != 0) {
614 while(1)
615 printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
616 break;
617 }
618 if (copy_to_user((void __user *)in1,pm_buffer,128)) {
619 while(1)
620 printk("xen_pal_emulator: PAL_PERF_MON_INFO "
621 "can't copy to user!!!!\n");
622 status = PAL_STATUS_UNIMPLEMENTED;
623 break;
624 }
625 }
626 break;
627 case PAL_CACHE_INFO:
628 {
629 pal_cache_config_info_t ci;
630 status = ia64_pal_cache_config_info(in1,in2,&ci);
631 if (status != 0) break;
632 r9 = ci.pcci_info_1.pcci1_data;
633 r10 = ci.pcci_info_2.pcci2_data;
634 }
635 break;
636 case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */
637 printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
638 break;
639 case PAL_HALT_INFO:
640 {
641 /* 1000 cycles to enter/leave low power state,
642 consumes 10 mW, implemented and cache/TLB coherent. */
643 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
644 | (1UL << 61) | (1UL << 60);
645 if (copy_to_user ((void *)in1, &res, sizeof (res)))
646 status = PAL_STATUS_EINVAL;
647 else
648 status = PAL_STATUS_SUCCESS;
649 }
650 break;
651 case PAL_HALT:
652 if (current->domain == dom0) {
653 printk ("Domain0 halts the machine\n");
654 console_start_sync();
655 (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
656 } else {
657 set_bit(_VCPUF_down, &current->vcpu_flags);
658 vcpu_sleep_nosync(current);
659 status = PAL_STATUS_SUCCESS;
660 }
661 break;
662 case PAL_HALT_LIGHT:
663 if (VMX_DOMAIN(current)) {
664 /* Called by VTI. */
665 if (!is_unmasked_irq(current)) {
666 do_sched_op_compat(SCHEDOP_block, 0);
667 do_softirq();
668 }
669 status = PAL_STATUS_SUCCESS;
670 }
671 break;
672 case PAL_PLATFORM_ADDR:
673 if (VMX_DOMAIN(current))
674 status = PAL_STATUS_SUCCESS;
675 break;
676 case PAL_LOGICAL_TO_PHYSICAL:
677 /* Optional, no need to complain about being unimplemented */
678 break;
679 default:
680 printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
681 index);
682 break;
683 }
684 return ((struct ia64_pal_retval) {status, r9, r10, r11});
685 }
687 // given a current domain (virtual or metaphysical) address, return the virtual address
688 static unsigned long
689 efi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault,
690 struct page_info** page)
691 {
692 struct vcpu *v = current;
693 unsigned long mpaddr = domain_addr;
694 unsigned long virt;
695 *fault = IA64_NO_FAULT;
697 again:
698 if (v->domain->arch.sal_data->efi_virt_mode) {
699 *fault = vcpu_tpa(v, domain_addr, &mpaddr);
700 if (*fault != IA64_NO_FAULT) return 0;
701 }
703 virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr);
704 *page = virt_to_page(virt);
705 if (get_page(*page, current->domain) == 0) {
706 if (page_get_owner(*page) != current->domain) {
707 // which code is appropriate?
708 *fault = IA64_FAULT;
709 return 0;
710 }
711 goto again;
712 }
714 return virt;
715 }
717 static efi_status_t
718 efi_emulate_get_time(
719 unsigned long tv_addr, unsigned long tc_addr,
720 IA64FAULT *fault)
721 {
722 unsigned long tv, tc = 0;
723 struct page_info *tv_page = NULL;
724 struct page_info *tc_page = NULL;
725 efi_status_t status = 0;
726 efi_time_t *tvp;
727 struct tm timeptr;
728 unsigned long xtimesec;
730 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
731 if (*fault != IA64_NO_FAULT)
732 goto errout;
733 if (tc_addr) {
734 tc = efi_translate_domain_addr(tc_addr, fault, &tc_page);
735 if (*fault != IA64_NO_FAULT)
736 goto errout;
737 }
739 spin_lock(&efi_time_services_lock);
740 status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
741 tvp = (efi_time_t *)tv;
742 xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour,
743 tvp->minute, tvp->second);
744 xtimesec += current->domain->time_offset_seconds;
745 timeptr = gmtime(xtimesec);
746 tvp->second = timeptr.tm_sec;
747 tvp->minute = timeptr.tm_min;
748 tvp->hour = timeptr.tm_hour;
749 tvp->day = timeptr.tm_mday;
750 tvp->month = timeptr.tm_mon + 1;
751 tvp->year = timeptr.tm_year + 1900;
752 spin_unlock(&efi_time_services_lock);
754 errout:
755 if (tc_page != NULL)
756 put_page(tc_page);
757 if (tv_page != NULL)
758 put_page(tv_page);
760 return status;
761 }
763 static efi_status_t
764 efi_emulate_set_time(
765 unsigned long tv_addr, IA64FAULT *fault)
766 {
767 unsigned long tv;
768 struct page_info *tv_page = NULL;
769 efi_status_t status = 0;
771 if (current->domain != dom0)
772 return EFI_UNSUPPORTED;
774 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
775 if (*fault != IA64_NO_FAULT)
776 goto errout;
778 spin_lock(&efi_time_services_lock);
779 status = (*efi.set_time)((efi_time_t *)tv);
780 spin_unlock(&efi_time_services_lock);
782 errout:
783 if (tv_page != NULL)
784 put_page(tv_page);
786 return status;
787 }
789 static efi_status_t
790 efi_emulate_get_wakeup_time(
791 unsigned long e_addr, unsigned long p_addr,
792 unsigned long tv_addr, IA64FAULT *fault)
793 {
794 unsigned long enabled, pending, tv;
795 struct page_info *e_page = NULL, *p_page = NULL,
796 *tv_page = NULL;
797 efi_status_t status = 0;
799 if (current->domain != dom0)
800 return EFI_UNSUPPORTED;
802 if (!e_addr || !p_addr || !tv_addr)
803 return EFI_INVALID_PARAMETER;
805 enabled = efi_translate_domain_addr(e_addr, fault, &e_page);
806 if (*fault != IA64_NO_FAULT)
807 goto errout;
808 pending = efi_translate_domain_addr(p_addr, fault, &p_page);
809 if (*fault != IA64_NO_FAULT)
810 goto errout;
811 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
812 if (*fault != IA64_NO_FAULT)
813 goto errout;
815 spin_lock(&efi_time_services_lock);
816 status = (*efi.get_wakeup_time)((efi_bool_t *)enabled,
817 (efi_bool_t *)pending,
818 (efi_time_t *)tv);
819 spin_unlock(&efi_time_services_lock);
821 errout:
822 if (e_page != NULL)
823 put_page(e_page);
824 if (p_page != NULL)
825 put_page(p_page);
826 if (tv_page != NULL)
827 put_page(tv_page);
829 return status;
830 }
832 static efi_status_t
833 efi_emulate_set_wakeup_time(
834 unsigned long enabled, unsigned long tv_addr,
835 IA64FAULT *fault)
836 {
837 unsigned long tv = 0;
838 struct page_info *tv_page = NULL;
839 efi_status_t status = 0;
841 if (current->domain != dom0)
842 return EFI_UNSUPPORTED;
844 if (tv_addr) {
845 tv = efi_translate_domain_addr(tv_addr, fault, &tv_page);
846 if (*fault != IA64_NO_FAULT)
847 goto errout;
848 }
850 spin_lock(&efi_time_services_lock);
851 status = (*efi.set_wakeup_time)((efi_bool_t)enabled,
852 (efi_time_t *)tv);
853 spin_unlock(&efi_time_services_lock);
855 errout:
856 if (tv_page != NULL)
857 put_page(tv_page);
859 return status;
860 }
862 static efi_status_t
863 efi_emulate_get_variable(
864 unsigned long name_addr, unsigned long vendor_addr,
865 unsigned long attr_addr, unsigned long data_size_addr,
866 unsigned long data_addr, IA64FAULT *fault)
867 {
868 unsigned long name, vendor, attr = 0, data_size, data;
869 struct page_info *name_page = NULL, *vendor_page = NULL,
870 *attr_page = NULL, *data_size_page = NULL,
871 *data_page = NULL;
872 efi_status_t status = 0;
874 if (current->domain != dom0)
875 return EFI_UNSUPPORTED;
877 name = efi_translate_domain_addr(name_addr, fault, &name_page);
878 if (*fault != IA64_NO_FAULT)
879 goto errout;
880 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
881 if (*fault != IA64_NO_FAULT)
882 goto errout;
883 data_size = efi_translate_domain_addr(data_size_addr, fault,
884 &data_size_page);
885 if (*fault != IA64_NO_FAULT)
886 goto errout;
887 data = efi_translate_domain_addr(data_addr, fault, &data_page);
888 if (*fault != IA64_NO_FAULT)
889 goto errout;
890 if (attr_addr) {
891 attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
892 if (*fault != IA64_NO_FAULT)
893 goto errout;
894 }
896 status = (*efi.get_variable)((efi_char16_t *)name,
897 (efi_guid_t *)vendor,
898 (u32 *)attr,
899 (unsigned long *)data_size,
900 (void *)data);
902 errout:
903 if (name_page != NULL)
904 put_page(name_page);
905 if (vendor_page != NULL)
906 put_page(vendor_page);
907 if (attr_page != NULL)
908 put_page(attr_page);
909 if (data_size_page != NULL)
910 put_page(data_size_page);
911 if (data_page != NULL)
912 put_page(data_page);
914 return status;
915 }
917 static efi_status_t
918 efi_emulate_get_next_variable(
919 unsigned long name_size_addr, unsigned long name_addr,
920 unsigned long vendor_addr, IA64FAULT *fault)
921 {
922 unsigned long name_size, name, vendor;
923 struct page_info *name_size_page = NULL, *name_page = NULL,
924 *vendor_page = NULL;
925 efi_status_t status = 0;
927 if (current->domain != dom0)
928 return EFI_UNSUPPORTED;
930 name_size = efi_translate_domain_addr(name_size_addr, fault,
931 &name_size_page);
932 if (*fault != IA64_NO_FAULT)
933 goto errout;
934 name = efi_translate_domain_addr(name_addr, fault, &name_page);
935 if (*fault != IA64_NO_FAULT)
936 goto errout;
937 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
938 if (*fault != IA64_NO_FAULT)
939 goto errout;
941 status = (*efi.get_next_variable)((unsigned long *)name_size,
942 (efi_char16_t *)name,
943 (efi_guid_t *)vendor);
945 errout:
946 if (name_size_page != NULL)
947 put_page(name_size_page);
948 if (name_page != NULL)
949 put_page(name_page);
950 if (vendor_page != NULL)
951 put_page(vendor_page);
953 return status;
954 }
956 static efi_status_t
957 efi_emulate_set_variable(
958 unsigned long name_addr, unsigned long vendor_addr,
959 unsigned long attr, unsigned long data_size,
960 unsigned long data_addr, IA64FAULT *fault)
961 {
962 unsigned long name, vendor, data;
963 struct page_info *name_page = NULL, *vendor_page = NULL,
964 *data_page = NULL;
965 efi_status_t status = 0;
967 if (current->domain != dom0)
968 return EFI_UNSUPPORTED;
970 name = efi_translate_domain_addr(name_addr, fault, &name_page);
971 if (*fault != IA64_NO_FAULT)
972 goto errout;
973 vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
974 if (*fault != IA64_NO_FAULT)
975 goto errout;
976 data = efi_translate_domain_addr(data_addr, fault, &data_page);
977 if (*fault != IA64_NO_FAULT)
978 goto errout;
980 status = (*efi.set_variable)((efi_char16_t *)name,
981 (efi_guid_t *)vendor,
982 attr,
983 data_size,
984 (void *)data);
986 errout:
987 if (name_page != NULL)
988 put_page(name_page);
989 if (vendor_page != NULL)
990 put_page(vendor_page);
991 if (data_page != NULL)
992 put_page(data_page);
994 return status;
995 }
997 static efi_status_t
998 efi_emulate_set_virtual_address_map(
999 unsigned long memory_map_size, unsigned long descriptor_size,
1000 u32 descriptor_version, efi_memory_desc_t *virtual_map)
1002 void *efi_map_start, *efi_map_end, *p;
1003 efi_memory_desc_t entry, *md = &entry;
1004 u64 efi_desc_size;
1006 unsigned long *vfn;
1007 struct domain *d = current->domain;
1008 efi_runtime_services_t *efi_runtime = d->arch.efi_runtime;
1009 fpswa_interface_t *fpswa_inf = d->arch.fpswa_inf;
1011 if (descriptor_version != EFI_MEMDESC_VERSION) {
1012 printk ("efi_emulate_set_virtual_address_map: memory "
1013 "descriptor version unmatched (%d vs %d)\n",
1014 (int)descriptor_version, EFI_MEMDESC_VERSION);
1015 return EFI_INVALID_PARAMETER;
1018 if (descriptor_size != sizeof(efi_memory_desc_t)) {
1019 printk ("efi_emulate_set_virtual_address_map: memory descriptor size unmatched\n");
1020 return EFI_INVALID_PARAMETER;
1023 if (d->arch.sal_data->efi_virt_mode)
1024 return EFI_UNSUPPORTED;
1026 efi_map_start = virtual_map;
1027 efi_map_end = efi_map_start + memory_map_size;
1028 efi_desc_size = sizeof(efi_memory_desc_t);
1030 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1031 if (copy_from_user(&entry, p, sizeof(efi_memory_desc_t))) {
1032 printk ("efi_emulate_set_virtual_address_map: copy_from_user() fault. addr=0x%p\n", p);
1033 return EFI_UNSUPPORTED;
1036 /* skip over non-PAL_CODE memory descriptors; EFI_RUNTIME is included in PAL_CODE. */
1037 if (md->type != EFI_PAL_CODE)
1038 continue;
1040 #define EFI_HYPERCALL_PATCH_TO_VIRT(tgt,call) \
1041 do { \
1042 vfn = (unsigned long *) domain_mpa_to_imva(d, tgt); \
1043 *vfn++ = FW_HYPERCALL_##call##_INDEX * 16UL + md->virt_addr; \
1044 *vfn++ = 0; \
1045 } while (0)
1047 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_time,EFI_GET_TIME);
1048 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_time,EFI_SET_TIME);
1049 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
1050 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
1051 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
1052 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_variable,EFI_GET_VARIABLE);
1053 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
1054 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->set_variable,EFI_SET_VARIABLE);
1055 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
1056 EFI_HYPERCALL_PATCH_TO_VIRT(efi_runtime->reset_system,EFI_RESET_SYSTEM);
1058 vfn = (unsigned long *) domain_mpa_to_imva(d, (unsigned long) fpswa_inf->fpswa);
1059 *vfn++ = FW_HYPERCALL_FPSWA_PATCH_INDEX * 16UL + md->virt_addr;
1060 *vfn = 0;
1061 fpswa_inf->fpswa = (void *) (FW_HYPERCALL_FPSWA_ENTRY_INDEX * 16UL + md->virt_addr);
1062 break;
1065 /* The virtual address map has been applied. */
1066 d->arch.sal_data->efi_virt_mode = 1;
1068 return EFI_SUCCESS;
1071 efi_status_t
1072 efi_emulator (struct pt_regs *regs, IA64FAULT *fault)
1074 struct vcpu *v = current;
1075 efi_status_t status;
1077 *fault = IA64_NO_FAULT;
1079 switch (regs->r2) {
1080 case FW_HYPERCALL_EFI_RESET_SYSTEM:
1082 u8 reason;
1083 unsigned long val = vcpu_get_gr(v,32);
1084 switch (val)
1086 case EFI_RESET_SHUTDOWN:
1087 reason = SHUTDOWN_poweroff;
1088 break;
1089 case EFI_RESET_COLD:
1090 case EFI_RESET_WARM:
1091 default:
1092 reason = SHUTDOWN_reboot;
1093 break;
1095 domain_shutdown (current->domain, reason);
1097 status = EFI_UNSUPPORTED;
1098 break;
1099 case FW_HYPERCALL_EFI_GET_TIME:
1100 status = efi_emulate_get_time (
1101 vcpu_get_gr(v,32),
1102 vcpu_get_gr(v,33),
1103 fault);
1104 break;
1105 case FW_HYPERCALL_EFI_SET_TIME:
1106 status = efi_emulate_set_time (
1107 vcpu_get_gr(v,32),
1108 fault);
1109 break;
1110 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
1111 status = efi_emulate_get_wakeup_time (
1112 vcpu_get_gr(v,32),
1113 vcpu_get_gr(v,33),
1114 vcpu_get_gr(v,34),
1115 fault);
1116 break;
1117 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
1118 status = efi_emulate_set_wakeup_time (
1119 vcpu_get_gr(v,32),
1120 vcpu_get_gr(v,33),
1121 fault);
1122 break;
1123 case FW_HYPERCALL_EFI_GET_VARIABLE:
1124 status = efi_emulate_get_variable (
1125 vcpu_get_gr(v,32),
1126 vcpu_get_gr(v,33),
1127 vcpu_get_gr(v,34),
1128 vcpu_get_gr(v,35),
1129 vcpu_get_gr(v,36),
1130 fault);
1131 break;
1132 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
1133 status = efi_emulate_get_next_variable (
1134 vcpu_get_gr(v,32),
1135 vcpu_get_gr(v,33),
1136 vcpu_get_gr(v,34),
1137 fault);
1138 break;
1139 case FW_HYPERCALL_EFI_SET_VARIABLE:
1140 status = efi_emulate_set_variable (
1141 vcpu_get_gr(v,32),
1142 vcpu_get_gr(v,33),
1143 vcpu_get_gr(v,34),
1144 vcpu_get_gr(v,35),
1145 vcpu_get_gr(v,36),
1146 fault);
1147 break;
1148 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
1149 status = efi_emulate_set_virtual_address_map (
1150 vcpu_get_gr(v,32),
1151 vcpu_get_gr(v,33),
1152 (u32) vcpu_get_gr(v,34),
1153 (efi_memory_desc_t *) vcpu_get_gr(v,35));
1154 break;
1155 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
1156 // FIXME: need fixes in efi.h from 2.6.9
1157 status = EFI_UNSUPPORTED;
1158 break;
1159 default:
1160 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
1161 status = EFI_UNSUPPORTED;
1164 return status;
1167 void
1168 do_ssc(unsigned long ssc, struct pt_regs *regs)
1170 unsigned long arg0, arg1, arg2, arg3, retval;
1171 char buf[2];
1172 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
1173 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
1174 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
1176 arg0 = vcpu_get_gr(current,32);
1177 switch(ssc) {
1178 case SSC_PUTCHAR:
1179 buf[0] = arg0;
1180 buf[1] = '\0';
1181 printk(buf);
1182 break;
1183 case SSC_GETCHAR:
1184 retval = ia64_ssc(0,0,0,0,ssc);
1185 vcpu_set_gr(current,8,retval,0);
1186 break;
1187 case SSC_WAIT_COMPLETION:
1188 if (arg0) { // metaphysical address
1190 arg0 = translate_domain_mpaddr(arg0, NULL);
1191 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
1192 ///**/ if (stat->fd == last_fd) stat->count = last_count;
1193 /**/ stat->count = last_count;
1194 //if (last_count >= PAGE_SIZE) printk("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
1195 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
1196 /**/ retval = 0;
1198 else retval = -1L;
1199 vcpu_set_gr(current,8,retval,0);
1200 break;
1201 case SSC_OPEN:
1202 arg1 = vcpu_get_gr(current,33); // access rights
1203 if (!running_on_sim) { printk("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
1204 if (arg0) { // metaphysical address
1205 arg0 = translate_domain_mpaddr(arg0, NULL);
1206 retval = ia64_ssc(arg0,arg1,0,0,ssc);
1208 else retval = -1L;
1209 vcpu_set_gr(current,8,retval,0);
1210 break;
1211 case SSC_WRITE:
1212 case SSC_READ:
1213 //if (ssc == SSC_WRITE) printk("DOING AN SSC_WRITE\n");
1214 arg1 = vcpu_get_gr(current,33);
1215 arg2 = vcpu_get_gr(current,34);
1216 arg3 = vcpu_get_gr(current,35);
1217 if (arg2) { // metaphysical address of descriptor
1218 struct ssc_disk_req *req;
1219 unsigned long mpaddr;
1220 long len;
1222 arg2 = translate_domain_mpaddr(arg2, NULL);
1223 req = (struct ssc_disk_req *) __va(arg2);
1224 req->len &= 0xffffffffL; // avoid strange bug
1225 len = req->len;
1226 /**/ last_fd = arg1;
1227 /**/ last_count = len;
1228 mpaddr = req->addr;
1229 //if (last_count >= PAGE_SIZE) printk("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
1230 retval = 0;
1231 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
1232 // do partial page first
1233 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1234 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
1235 len -= req->len; mpaddr += req->len;
1236 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1237 arg3 += req->len; // file offset
1238 /**/ last_stat.fd = last_fd;
1239 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1240 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
1242 if (retval >= 0) while (len > 0) {
1243 req->addr = translate_domain_mpaddr(mpaddr, NULL);
1244 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
1245 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
1246 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1247 arg3 += req->len; // file offset
1248 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
1249 /**/ last_stat.fd = last_fd;
1250 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
1251 //if (last_count >= PAGE_SIZE) printk("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
1253 // set it back to the original value
1254 req->len = last_count;
1256 else retval = -1L;
1257 vcpu_set_gr(current,8,retval,0);
1258 //if (last_count >= PAGE_SIZE) printk("retval=%x\n",retval);
1259 break;
1260 case SSC_CONNECT_INTERRUPT:
1261 arg1 = vcpu_get_gr(current,33);
1262 arg2 = vcpu_get_gr(current,34);
1263 arg3 = vcpu_get_gr(current,35);
1264 if (!running_on_sim) { printk("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
1265 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
1266 break;
1267 case SSC_NETDEV_PROBE:
1268 vcpu_set_gr(current,8,-1L,0);
1269 break;
1270 default:
1271 panic_domain(regs,
1272 "%s: bad ssc code %lx, iip=0x%lx, b0=0x%lx\n",
1273 __func__, ssc, regs->cr_iip, regs->b0);
1274 break;
1276 vcpu_increment_iip(current);