ia64/xen-unstable

view xen/arch/x86/x86_64/traps.c @ 17965:14fd83fe71c3

Add facility to get notification of domain suspend by event channel.
This event channel will be notified when the domain transitions to the
suspended state, which can be much faster than raising VIRQ_DOM_EXC
and waiting for the notification to be propagated via xenstore.

No attempt is made here to prevent multiple subscribers (last one
wins), or to detect that the subscriber has gone away. Userspace tools
should take care.

Signed-off-by: Brendan Cully <brendan@cs.ubc.ca>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jul 04 12:00:24 2008 +0100 (2008-07-04)
parents 6b1795ee1b19
children d711529e3de1
line source
2 #include <xen/config.h>
3 #include <xen/version.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/lib.h>
7 #include <xen/errno.h>
8 #include <xen/mm.h>
9 #include <xen/irq.h>
10 #include <xen/symbols.h>
11 #include <xen/console.h>
12 #include <xen/sched.h>
13 #include <xen/shutdown.h>
14 #include <xen/nmi.h>
15 #include <asm/current.h>
16 #include <asm/flushtlb.h>
17 #include <asm/msr.h>
18 #include <asm/page.h>
19 #include <asm/shared.h>
20 #include <asm/hvm/hvm.h>
21 #include <asm/hvm/support.h>
22 #include <public/callback.h>
24 asmlinkage void syscall_enter(void);
25 asmlinkage void sysenter_entry(void);
26 asmlinkage void compat_hypercall(void);
27 asmlinkage void int80_direct_trap(void);
29 static void print_xen_info(void)
30 {
31 char taint_str[TAINT_STRING_MAX_LEN];
32 char debug = 'n';
34 #ifndef NDEBUG
35 debug = 'y';
36 #endif
38 printk("----[ Xen-%d.%d%s x86_64 debug=%c %s ]----\n",
39 xen_major_version(), xen_minor_version(), xen_extra_version(),
40 debug, print_tainted(taint_str));
41 }
43 static void _show_registers(const struct cpu_user_regs *regs,
44 unsigned long crs[8], int guest_mode,
45 const char *context)
46 {
47 printk("RIP: %04x:[<%016lx>]", regs->cs, regs->rip);
48 if ( !guest_mode )
49 print_symbol(" %s", regs->rip);
50 printk("\nRFLAGS: %016lx CONTEXT: %s\n", regs->rflags, context);
51 printk("rax: %016lx rbx: %016lx rcx: %016lx\n",
52 regs->rax, regs->rbx, regs->rcx);
53 printk("rdx: %016lx rsi: %016lx rdi: %016lx\n",
54 regs->rdx, regs->rsi, regs->rdi);
55 printk("rbp: %016lx rsp: %016lx r8: %016lx\n",
56 regs->rbp, regs->rsp, regs->r8);
57 printk("r9: %016lx r10: %016lx r11: %016lx\n",
58 regs->r9, regs->r10, regs->r11);
59 printk("r12: %016lx r13: %016lx r14: %016lx\n",
60 regs->r12, regs->r13, regs->r14);
61 printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
62 regs->r15, crs[0], crs[4]);
63 printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]);
64 printk("ds: %04x es: %04x fs: %04x gs: %04x "
65 "ss: %04x cs: %04x\n",
66 regs->ds, regs->es, regs->fs,
67 regs->gs, regs->ss, regs->cs);
68 }
70 void show_registers(struct cpu_user_regs *regs)
71 {
72 struct cpu_user_regs fault_regs = *regs;
73 unsigned long fault_crs[8];
74 const char *context;
75 struct vcpu *v = current;
77 if ( is_hvm_vcpu(v) && guest_mode(regs) )
78 {
79 struct segment_register sreg;
80 context = "hvm";
81 fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
82 fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
83 fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
84 fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
85 hvm_get_segment_register(v, x86_seg_cs, &sreg);
86 fault_regs.cs = sreg.sel;
87 hvm_get_segment_register(v, x86_seg_ds, &sreg);
88 fault_regs.ds = sreg.sel;
89 hvm_get_segment_register(v, x86_seg_es, &sreg);
90 fault_regs.es = sreg.sel;
91 hvm_get_segment_register(v, x86_seg_fs, &sreg);
92 fault_regs.fs = sreg.sel;
93 hvm_get_segment_register(v, x86_seg_gs, &sreg);
94 fault_regs.gs = sreg.sel;
95 hvm_get_segment_register(v, x86_seg_ss, &sreg);
96 fault_regs.ss = sreg.sel;
97 }
98 else
99 {
100 if ( guest_mode(regs) )
101 {
102 context = "guest";
103 fault_crs[2] = arch_get_cr2(v);
104 }
105 else
106 {
107 context = "hypervisor";
108 fault_crs[2] = read_cr2();
109 }
111 fault_crs[0] = read_cr0();
112 fault_crs[3] = read_cr3();
113 fault_crs[4] = read_cr4();
114 fault_regs.ds = read_segment_register(ds);
115 fault_regs.es = read_segment_register(es);
116 fault_regs.fs = read_segment_register(fs);
117 fault_regs.gs = read_segment_register(gs);
118 }
120 print_xen_info();
121 printk("CPU: %d\n", smp_processor_id());
122 _show_registers(&fault_regs, fault_crs, guest_mode(regs), context);
124 if ( this_cpu(ler_msr) && !guest_mode(regs) )
125 {
126 u64 from, to;
127 rdmsrl(this_cpu(ler_msr), from);
128 rdmsrl(this_cpu(ler_msr) + 1, to);
129 printk("ler: %016lx -> %016lx\n", from, to);
130 }
131 }
133 void vcpu_show_registers(const struct vcpu *v)
134 {
135 const struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
136 unsigned long crs[8];
138 /* No need to handle HVM for now. */
139 if ( is_hvm_vcpu(v) )
140 return;
142 crs[0] = v->arch.guest_context.ctrlreg[0];
143 crs[2] = arch_get_cr2(v);
144 crs[3] = pagetable_get_paddr(guest_kernel_mode(v, regs) ?
145 v->arch.guest_table :
146 v->arch.guest_table_user);
147 crs[4] = v->arch.guest_context.ctrlreg[4];
149 _show_registers(regs, crs, 1, "guest");
150 }
152 void show_page_walk(unsigned long addr)
153 {
154 unsigned long pfn, mfn = read_cr3() >> PAGE_SHIFT;
155 l4_pgentry_t l4e, *l4t;
156 l3_pgentry_t l3e, *l3t;
157 l2_pgentry_t l2e, *l2t;
158 l1_pgentry_t l1e, *l1t;
160 printk("Pagetable walk from %016lx:\n", addr);
162 l4t = mfn_to_virt(mfn);
163 l4e = l4t[l4_table_offset(addr)];
164 mfn = l4e_get_pfn(l4e);
165 pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
166 printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
167 l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
168 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
169 return;
171 l3t = mfn_to_virt(mfn);
172 l3e = l3t[l3_table_offset(addr)];
173 mfn = l3e_get_pfn(l3e);
174 pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
175 printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n",
176 l3_table_offset(addr), l3e_get_intpte(l3e), pfn,
177 (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : "");
178 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
179 (l3e_get_flags(l3e) & _PAGE_PSE) )
180 return;
182 l2t = mfn_to_virt(mfn);
183 l2e = l2t[l2_table_offset(addr)];
184 mfn = l2e_get_pfn(l2e);
185 pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
186 printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
187 l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
188 (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
189 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
190 (l2e_get_flags(l2e) & _PAGE_PSE) )
191 return;
193 l1t = mfn_to_virt(mfn);
194 l1e = l1t[l1_table_offset(addr)];
195 mfn = l1e_get_pfn(l1e);
196 pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
197 printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
198 l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
199 }
201 asmlinkage void double_fault(void);
202 asmlinkage void do_double_fault(struct cpu_user_regs *regs)
203 {
204 unsigned int cpu, tr;
206 asm volatile ( "str %0" : "=r" (tr) );
207 cpu = ((tr >> 3) - __FIRST_TSS_ENTRY) >> 2;
209 watchdog_disable();
211 console_force_unlock();
213 /* Find information saved during fault and dump it to the console. */
214 printk("*** DOUBLE FAULT ***\n");
215 print_xen_info();
216 printk("CPU: %d\nRIP: %04x:[<%016lx>]",
217 cpu, regs->cs, regs->rip);
218 print_symbol(" %s", regs->rip);
219 printk("\nRFLAGS: %016lx\n", regs->rflags);
220 printk("rax: %016lx rbx: %016lx rcx: %016lx\n",
221 regs->rax, regs->rbx, regs->rcx);
222 printk("rdx: %016lx rsi: %016lx rdi: %016lx\n",
223 regs->rdx, regs->rsi, regs->rdi);
224 printk("rbp: %016lx rsp: %016lx r8: %016lx\n",
225 regs->rbp, regs->rsp, regs->r8);
226 printk("r9: %016lx r10: %016lx r11: %016lx\n",
227 regs->r9, regs->r10, regs->r11);
228 printk("r12: %016lx r13: %016lx r14: %016lx\n",
229 regs->r12, regs->r13, regs->r14);
230 printk("r15: %016lx cs: %016lx ss: %016lx\n",
231 regs->r15, (long)regs->cs, (long)regs->ss);
232 show_stack_overflow(cpu, regs->rsp);
234 panic("DOUBLE FAULT -- system shutdown\n");
235 }
237 void toggle_guest_mode(struct vcpu *v)
238 {
239 if ( is_pv_32bit_vcpu(v) )
240 return;
241 v->arch.flags ^= TF_kernel_mode;
242 asm volatile ( "swapgs" );
243 update_cr3(v);
244 #ifdef USER_MAPPINGS_ARE_GLOBAL
245 /* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
246 asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
247 #else
248 write_ptbase(v);
249 #endif
250 }
252 unsigned long do_iret(void)
253 {
254 struct cpu_user_regs *regs = guest_cpu_user_regs();
255 struct iret_context iret_saved;
256 struct vcpu *v = current;
258 if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
259 sizeof(iret_saved))) )
260 {
261 gdprintk(XENLOG_ERR, "Fault while reading IRET context from "
262 "guest stack\n");
263 goto exit_and_crash;
264 }
266 /* Returning to user mode? */
267 if ( (iret_saved.cs & 3) == 3 )
268 {
269 if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
270 {
271 gdprintk(XENLOG_ERR, "Guest switching to user mode with no "
272 "user page tables\n");
273 goto exit_and_crash;
274 }
275 toggle_guest_mode(v);
276 }
278 regs->rip = iret_saved.rip;
279 regs->cs = iret_saved.cs | 3; /* force guest privilege */
280 regs->rflags = (iret_saved.rflags & ~(EF_IOPL|EF_VM)) | EF_IE;
281 regs->rsp = iret_saved.rsp;
282 regs->ss = iret_saved.ss | 3; /* force guest privilege */
284 if ( !(iret_saved.flags & VGCF_in_syscall) )
285 {
286 regs->entry_vector = 0;
287 regs->r11 = iret_saved.r11;
288 regs->rcx = iret_saved.rcx;
289 }
291 /* Restore affinity. */
292 if (!cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
293 vcpu_set_affinity(v, &v->cpu_affinity_tmp);
295 /* No longer in NMI context. */
296 v->nmi_masked = 0;
298 /* Restore upcall mask from supplied EFLAGS.IF. */
299 vcpu_info(v, evtchn_upcall_mask) = !(iret_saved.rflags & EF_IE);
301 /* Saved %rax gets written back to regs->rax in entry.S. */
302 return iret_saved.rax;
304 exit_and_crash:
305 gdprintk(XENLOG_ERR, "Fatal error\n");
306 domain_crash(v->domain);
307 return 0;
308 }
310 static int write_stack_trampoline(
311 char *stack, char *stack_bottom, uint16_t cs_seg)
312 {
313 /* movq %rsp, saversp(%rip) */
314 stack[0] = 0x48;
315 stack[1] = 0x89;
316 stack[2] = 0x25;
317 *(u32 *)&stack[3] = (stack_bottom - &stack[7]) - 16;
319 /* leaq saversp(%rip), %rsp */
320 stack[7] = 0x48;
321 stack[8] = 0x8d;
322 stack[9] = 0x25;
323 *(u32 *)&stack[10] = (stack_bottom - &stack[14]) - 16;
325 /* pushq %r11 */
326 stack[14] = 0x41;
327 stack[15] = 0x53;
329 /* pushq $<cs_seg> */
330 stack[16] = 0x68;
331 *(u32 *)&stack[17] = cs_seg;
333 /* movq $syscall_enter,%r11 */
334 stack[21] = 0x49;
335 stack[22] = 0xbb;
336 *(void **)&stack[23] = (void *)syscall_enter;
338 /* jmpq *%r11 */
339 stack[31] = 0x41;
340 stack[32] = 0xff;
341 stack[33] = 0xe3;
343 return 34;
344 }
346 void __devinit subarch_percpu_traps_init(void)
347 {
348 char *stack_bottom, *stack;
349 int cpu = smp_processor_id();
351 if ( cpu == 0 )
352 {
353 /* Specify dedicated interrupt stacks for NMI, #DF, and #MC. */
354 set_intr_gate(TRAP_double_fault, &double_fault);
355 idt_table[TRAP_double_fault].a |= IST_DF << 32;
356 idt_table[TRAP_nmi].a |= IST_NMI << 32;
357 idt_table[TRAP_machine_check].a |= IST_MCE << 32;
359 /*
360 * The 32-on-64 hypercall entry vector is only accessible from ring 1.
361 * Also note that this is a trap gate, not an interrupt gate.
362 */
363 _set_gate(idt_table+HYPERCALL_VECTOR, 15, 1, &compat_hypercall);
365 /* Fast trap for int80 (faster than taking the #GP-fixup path). */
366 _set_gate(idt_table+0x80, 15, 3, &int80_direct_trap);
367 }
369 stack_bottom = (char *)get_stack_bottom();
370 stack = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
372 /* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
373 BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
375 /* Machine Check handler has its own per-CPU 4kB stack. */
376 init_tss[cpu].ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
378 /* Double-fault handler has its own per-CPU 4kB stack. */
379 init_tss[cpu].ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
381 /* NMI handler has its own per-CPU 4kB stack. */
382 init_tss[cpu].ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
384 /* Trampoline for SYSCALL entry from long mode. */
385 stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */
386 wrmsrl(MSR_LSTAR, (unsigned long)stack);
387 stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
389 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
390 {
391 /* SYSENTER entry. */
392 wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);
393 wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry);
394 wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
395 }
397 /* Trampoline for SYSCALL entry from compatibility mode. */
398 stack = (char *)L1_CACHE_ALIGN((unsigned long)stack);
399 wrmsrl(MSR_CSTAR, (unsigned long)stack);
400 stack += write_stack_trampoline(stack, stack_bottom, FLAT_USER_CS32);
402 /* Common SYSCALL parameters. */
403 wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
404 wrmsr(MSR_SYSCALL_MASK, EF_VM|EF_RF|EF_NT|EF_DF|EF_IE|EF_TF, 0U);
405 }
407 void init_int80_direct_trap(struct vcpu *v)
408 {
409 struct trap_info *ti = &v->arch.guest_context.trap_ctxt[0x80];
410 struct trap_bounce *tb = &v->arch.int80_bounce;
412 tb->flags = TBF_EXCEPTION;
413 tb->cs = ti->cs;
414 tb->eip = ti->address;
416 if ( null_trap_bounce(v, tb) )
417 tb->flags = 0;
418 }
420 static long register_guest_callback(struct callback_register *reg)
421 {
422 long ret = 0;
423 struct vcpu *v = current;
425 if ( !is_canonical_address(reg->address) )
426 return -EINVAL;
428 switch ( reg->type )
429 {
430 case CALLBACKTYPE_event:
431 v->arch.guest_context.event_callback_eip = reg->address;
432 break;
434 case CALLBACKTYPE_failsafe:
435 v->arch.guest_context.failsafe_callback_eip = reg->address;
436 if ( reg->flags & CALLBACKF_mask_events )
437 set_bit(_VGCF_failsafe_disables_events,
438 &v->arch.guest_context.flags);
439 else
440 clear_bit(_VGCF_failsafe_disables_events,
441 &v->arch.guest_context.flags);
442 break;
444 case CALLBACKTYPE_syscall:
445 v->arch.guest_context.syscall_callback_eip = reg->address;
446 if ( reg->flags & CALLBACKF_mask_events )
447 set_bit(_VGCF_syscall_disables_events,
448 &v->arch.guest_context.flags);
449 else
450 clear_bit(_VGCF_syscall_disables_events,
451 &v->arch.guest_context.flags);
452 break;
454 case CALLBACKTYPE_syscall32:
455 v->arch.syscall32_callback_eip = reg->address;
456 v->arch.syscall32_disables_events =
457 !!(reg->flags & CALLBACKF_mask_events);
458 break;
460 case CALLBACKTYPE_sysenter:
461 v->arch.sysenter_callback_eip = reg->address;
462 v->arch.sysenter_disables_events =
463 !!(reg->flags & CALLBACKF_mask_events);
464 break;
466 case CALLBACKTYPE_nmi:
467 ret = register_guest_nmi_callback(reg->address);
468 break;
470 default:
471 ret = -ENOSYS;
472 break;
473 }
475 return ret;
476 }
478 static long unregister_guest_callback(struct callback_unregister *unreg)
479 {
480 long ret;
482 switch ( unreg->type )
483 {
484 case CALLBACKTYPE_event:
485 case CALLBACKTYPE_failsafe:
486 case CALLBACKTYPE_syscall:
487 case CALLBACKTYPE_syscall32:
488 case CALLBACKTYPE_sysenter:
489 ret = -EINVAL;
490 break;
492 case CALLBACKTYPE_nmi:
493 ret = unregister_guest_nmi_callback();
494 break;
496 default:
497 ret = -ENOSYS;
498 break;
499 }
501 return ret;
502 }
505 long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
506 {
507 long ret;
509 switch ( cmd )
510 {
511 case CALLBACKOP_register:
512 {
513 struct callback_register reg;
515 ret = -EFAULT;
516 if ( copy_from_guest(&reg, arg, 1) )
517 break;
519 ret = register_guest_callback(&reg);
520 }
521 break;
523 case CALLBACKOP_unregister:
524 {
525 struct callback_unregister unreg;
527 ret = -EFAULT;
528 if ( copy_from_guest(&unreg, arg, 1) )
529 break;
531 ret = unregister_guest_callback(&unreg);
532 }
533 break;
535 default:
536 ret = -ENOSYS;
537 break;
538 }
540 return ret;
541 }
543 long do_set_callbacks(unsigned long event_address,
544 unsigned long failsafe_address,
545 unsigned long syscall_address)
546 {
547 struct callback_register event = {
548 .type = CALLBACKTYPE_event,
549 .address = event_address,
550 };
551 struct callback_register failsafe = {
552 .type = CALLBACKTYPE_failsafe,
553 .address = failsafe_address,
554 };
555 struct callback_register syscall = {
556 .type = CALLBACKTYPE_syscall,
557 .address = syscall_address,
558 };
560 register_guest_callback(&event);
561 register_guest_callback(&failsafe);
562 register_guest_callback(&syscall);
564 return 0;
565 }
567 static void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
568 {
569 char *p;
570 int i;
572 /* Fill in all the transfer points with template machine code. */
573 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
574 {
575 p = (char *)(hypercall_page + (i * 32));
576 *(u8 *)(p+ 0) = 0x51; /* push %rcx */
577 *(u16 *)(p+ 1) = 0x5341; /* push %r11 */
578 *(u8 *)(p+ 3) = 0xb8; /* mov $<i>,%eax */
579 *(u32 *)(p+ 4) = i;
580 *(u16 *)(p+ 8) = 0x050f; /* syscall */
581 *(u16 *)(p+10) = 0x5b41; /* pop %r11 */
582 *(u8 *)(p+12) = 0x59; /* pop %rcx */
583 *(u8 *)(p+13) = 0xc3; /* ret */
584 }
586 /*
587 * HYPERVISOR_iret is special because it doesn't return and expects a
588 * special stack frame. Guests jump at this transfer point instead of
589 * calling it.
590 */
591 p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
592 *(u8 *)(p+ 0) = 0x51; /* push %rcx */
593 *(u16 *)(p+ 1) = 0x5341; /* push %r11 */
594 *(u8 *)(p+ 3) = 0x50; /* push %rax */
595 *(u8 *)(p+ 4) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
596 *(u32 *)(p+ 5) = __HYPERVISOR_iret;
597 *(u16 *)(p+ 9) = 0x050f; /* syscall */
598 }
600 #include "compat/traps.c"
602 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
603 {
604 memset(hypercall_page, 0xCC, PAGE_SIZE);
605 if ( is_hvm_domain(d) )
606 hvm_hypercall_page_initialise(d, hypercall_page);
607 else if ( !is_pv_32bit_domain(d) )
608 hypercall_page_initialise_ring3_kernel(hypercall_page);
609 else
610 hypercall_page_initialise_ring1_kernel(hypercall_page);
611 }
613 /*
614 * Local variables:
615 * mode: C
616 * c-set-style: "BSD"
617 * c-basic-offset: 4
618 * tab-width: 4
619 * indent-tabs-mode: nil
620 * End:
621 */