ia64/xen-unstable

view xen/arch/x86/x86_64/entry.S @ 9262:c445d4a0dd76

Define a new sched_op hypercall called sched_op_new, which differs from the
legacy hypercall in that it takes a pointer to a block of extra arguments
rather than an opaque unsigned long. The old hypercall still exists, for
backwards compatibility.

The new hypercall supports new sub-command SCHEDOP_poll, which can be used to
wait on a set of event-channel ports with an optional timeout. This is exported
in XenLinux as HYPERVISOR_poll, and used in the pcifront driver to wait on a
response from the pciback driver.

Can also be used for debuggers. :-)

Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: John Levon <john.levon@sun.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Mar 14 19:33:45 2006 +0100 (2006-03-14)
parents 49c02a7a92dd
children 9bee4875a848
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/softirq.h>
10 #include <asm/asm_defns.h>
11 #include <asm/apicdef.h>
12 #include <asm/page.h>
13 #include <public/xen.h>
15 #define GET_GUEST_REGS(reg) \
16 movq $~(STACK_SIZE-1),reg; \
17 andq %rsp,reg; \
18 orq $(STACK_SIZE-CPUINFO_sizeof),reg;
20 #define GET_CURRENT(reg) \
21 movq $STACK_SIZE-8, reg; \
22 orq %rsp, reg; \
23 andq $~7,reg; \
24 movq (reg),reg;
26 ALIGN
27 /* %rbx: struct vcpu */
28 switch_to_kernel:
29 leaq VCPU_trap_bounce(%rbx),%rdx
30 movq VCPU_syscall_addr(%rbx),%rax
31 movq %rax,TRAPBOUNCE_eip(%rdx)
32 movw $0,TRAPBOUNCE_flags(%rdx)
33 call create_bounce_frame
34 jmp test_all_events
36 /* %rbx: struct vcpu, interrupts disabled */
37 restore_all_guest:
38 RESTORE_ALL
39 testw $TRAP_syscall,4(%rsp)
40 jz iret_exit_to_guest
42 addq $8,%rsp
43 popq %rcx # RIP
44 popq %r11 # CS
45 cmpw $__GUEST_CS32,%r11
46 popq %r11 # RFLAGS
47 popq %rsp # RSP
48 je 1f
49 sysretq
50 1: sysretl
52 ALIGN
53 /* No special register assumptions. */
54 iret_exit_to_guest:
55 addq $8,%rsp
56 FLT1: iretq
58 .section .fixup,"ax"
59 FIX1: popq -15*8-8(%rsp) # error_code/entry_vector
60 SAVE_ALL # 15*8 bytes pushed
61 movq -8(%rsp),%rsi # error_code/entry_vector
62 sti # after stack abuse (-1024(%rsp))
63 pushq $__HYPERVISOR_DS # SS
64 leaq 8(%rsp),%rax
65 pushq %rax # RSP
66 pushf # RFLAGS
67 pushq $__HYPERVISOR_CS # CS
68 leaq DBLFLT1(%rip),%rax
69 pushq %rax # RIP
70 pushq %rsi # error_code/entry_vector
71 jmp error_code
72 DBLFLT1:GET_CURRENT(%rbx)
73 jmp test_all_events
74 failsafe_callback:
75 GET_CURRENT(%rbx)
76 leaq VCPU_trap_bounce(%rbx),%rdx
77 movq VCPU_failsafe_addr(%rbx),%rax
78 movq %rax,TRAPBOUNCE_eip(%rdx)
79 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
80 call create_bounce_frame
81 jmp test_all_events
82 .previous
83 .section __pre_ex_table,"a"
84 .quad FLT1,FIX1
85 .previous
86 .section __ex_table,"a"
87 .quad DBLFLT1,failsafe_callback
88 .previous
90 ALIGN
91 /* No special register assumptions. */
92 restore_all_xen:
93 RESTORE_ALL
94 addq $8,%rsp
95 iretq
97 /*
98 * When entering SYSCALL from kernel mode:
99 * %rax = hypercall vector
100 * %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
101 * %r11, %rcx = SYSCALL-saved %rflags and %rip
102 * NB. We must move %r10 to %rcx for C function-calling ABI.
103 *
104 * When entering SYSCALL from user mode:
105 * Vector directly to the registered arch.syscall_addr.
106 *
107 * Initial work is done by per-CPU stack trampolines. At this point %rsp
108 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
109 * and %cs have been saved. All other registers are still to be saved onto
110 * the stack, starting with %rip, and an appropriate %ss must be saved into
111 * the space left by the trampoline.
112 */
113 ALIGN
114 ENTRY(syscall_enter)
115 sti
116 movl $__GUEST_SS,24(%rsp)
117 pushq %rcx
118 pushq $0
119 movl $TRAP_syscall,4(%rsp)
120 SAVE_ALL
121 GET_CURRENT(%rbx)
122 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
123 jz switch_to_kernel
125 /*hypercall:*/
126 movq %r10,%rcx
127 andq $(NR_hypercalls-1),%rax
128 #ifndef NDEBUG
129 /* Deliberately corrupt parameter regs not used by this hypercall. */
130 pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9
131 leaq hypercall_args_table(%rip),%r10
132 movq $6,%rcx
133 sub (%r10,%rax,1),%cl
134 movq %rsp,%rdi
135 movl $0xDEADBEEF,%eax
136 rep stosq
137 popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
138 movq UREGS_rax(%rsp),%rax
139 andq $(NR_hypercalls-1),%rax
140 pushq %rax
141 pushq UREGS_rip+8(%rsp)
142 #endif
143 leaq hypercall_table(%rip),%r10
144 PERFC_INCR(PERFC_hypercalls, %rax)
145 callq *(%r10,%rax,8)
146 #ifndef NDEBUG
147 /* Deliberately corrupt parameter regs used by this hypercall. */
148 popq %r10 # Shadow RIP
149 cmpq %r10,UREGS_rip(%rsp)
150 popq %rcx # Shadow hypercall index
151 jne skip_clobber /* If RIP has changed then don't clobber. */
152 leaq hypercall_args_table(%rip),%r10
153 movb (%r10,%rcx,1),%cl
154 movl $0xDEADBEEF,%r10d
155 cmpb $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
156 cmpb $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
157 cmpb $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
158 cmpb $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
159 cmpb $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
160 cmpb $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
161 skip_clobber:
162 #endif
163 movq %rax,UREGS_rax(%rsp) # save the return value
165 /* %rbx: struct vcpu */
166 test_all_events:
167 cli # tests must not race interrupts
168 /*test_softirqs:*/
169 movl VCPU_processor(%rbx),%eax
170 shl $IRQSTAT_shift,%rax
171 leaq irq_stat(%rip),%rcx
172 testl $~0,(%rcx,%rax,1)
173 jnz process_softirqs
174 btr $_VCPUF_nmi_pending,VCPU_flags(%rbx)
175 jc process_nmi
176 test_guest_events:
177 movq VCPU_vcpu_info(%rbx),%rax
178 testb $0xFF,VCPUINFO_upcall_mask(%rax)
179 jnz restore_all_guest
180 testb $0xFF,VCPUINFO_upcall_pending(%rax)
181 jz restore_all_guest
182 /*process_guest_events:*/
183 sti
184 leaq VCPU_trap_bounce(%rbx),%rdx
185 movq VCPU_event_addr(%rbx),%rax
186 movq %rax,TRAPBOUNCE_eip(%rdx)
187 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
188 call create_bounce_frame
189 jmp test_all_events
191 ALIGN
192 /* %rbx: struct vcpu */
193 process_softirqs:
194 sti
195 call do_softirq
196 jmp test_all_events
198 ALIGN
199 /* %rbx: struct vcpu */
200 process_nmi:
201 movq VCPU_nmi_addr(%rbx),%rax
202 test %rax,%rax
203 jz test_all_events
204 bts $_VCPUF_nmi_masked,VCPU_flags(%rbx)
205 jc 1f
206 sti
207 leaq VCPU_trap_bounce(%rbx),%rdx
208 movq %rax,TRAPBOUNCE_eip(%rdx)
209 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
210 call create_bounce_frame
211 jmp test_all_events
212 1: bts $_VCPUF_nmi_pending,VCPU_flags(%rbx)
213 jmp test_guest_events
215 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
216 /* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
217 /* %rdx: trap_bounce, %rbx: struct vcpu */
218 /* On return only %rbx is guaranteed non-clobbered. */
219 create_bounce_frame:
220 testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
221 jnz 1f
222 /* Push new frame at registered guest-OS stack base. */
223 pushq %rdx
224 movq %rbx,%rdi
225 call toggle_guest_mode
226 popq %rdx
227 movq VCPU_kernel_sp(%rbx),%rsi
228 jmp 2f
229 1: /* In kernel context already: push new frame at existing %rsp. */
230 movq UREGS_rsp+8(%rsp),%rsi
231 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
232 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
233 movq $HYPERVISOR_VIRT_START,%rax
234 cmpq %rax,%rsi
235 jb 1f # In +ve address space? Then okay.
236 movq $HYPERVISOR_VIRT_END+60,%rax
237 cmpq %rax,%rsi
238 jb domain_crash_synchronous # Above Xen private area? Then okay.
239 1: movb TRAPBOUNCE_flags(%rdx),%cl
240 subq $40,%rsi
241 movq UREGS_ss+8(%rsp),%rax
242 FLT2: movq %rax,32(%rsi) # SS
243 movq UREGS_rsp+8(%rsp),%rax
244 FLT3: movq %rax,24(%rsi) # RSP
245 movq VCPU_vcpu_info(%rbx),%rax
246 pushq VCPUINFO_upcall_mask(%rax)
247 testb $TBF_INTERRUPT,%cl
248 setnz %ch # TBF_INTERRUPT -> set upcall mask
249 orb %ch,VCPUINFO_upcall_mask(%rax)
250 popq %rax
251 shlq $32,%rax # Bits 32-39: saved_upcall_mask
252 movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
253 FLT4: movq %rax,8(%rsi) # CS / saved_upcall_mask
254 shrq $32,%rax
255 testb $0xFF,%al # Bits 0-7: saved_upcall_mask
256 setz %ch # %ch == !saved_upcall_mask
257 movq UREGS_eflags+8(%rsp),%rax
258 andq $~X86_EFLAGS_IF,%rax
259 shlb $1,%ch # Bit 9 (EFLAGS.IF)
260 orb %ch,%ah # Fold EFLAGS.IF into %eax
261 FLT5: movq %rax,16(%rsi) # RFLAGS
262 movq UREGS_rip+8(%rsp),%rax
263 FLT6: movq %rax,(%rsi) # RIP
264 testb $TBF_EXCEPTION_ERRCODE,%cl
265 jz 1f
266 subq $8,%rsi
267 movl TRAPBOUNCE_error_code(%rdx),%eax
268 FLT7: movq %rax,(%rsi) # ERROR CODE
269 1: testb $TBF_FAILSAFE,%cl
270 jz 2f
271 subq $32,%rsi
272 movl %gs,%eax
273 FLT8: movq %rax,24(%rsi) # GS
274 movl %fs,%eax
275 FLT9: movq %rax,16(%rsi) # FS
276 movl %es,%eax
277 FLT10: movq %rax,8(%rsi) # ES
278 movl %ds,%eax
279 FLT11: movq %rax,(%rsi) # DS
280 2: subq $16,%rsi
281 movq UREGS_r11+8(%rsp),%rax
282 FLT12: movq %rax,8(%rsi) # R11
283 movq UREGS_rcx+8(%rsp),%rax
284 FLT13: movq %rax,(%rsi) # RCX
285 /* Rewrite our stack frame and return to guest-OS mode. */
286 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
287 movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
288 andl $0xfffcbeff,UREGS_eflags+8(%rsp)
289 movq $__GUEST_SS,UREGS_ss+8(%rsp)
290 movq %rsi,UREGS_rsp+8(%rsp)
291 movq $__GUEST_CS,UREGS_cs+8(%rsp)
292 movq TRAPBOUNCE_eip(%rdx),%rax
293 testq %rax,%rax
294 jz domain_crash_synchronous
295 movq %rax,UREGS_rip+8(%rsp)
296 movb $0,TRAPBOUNCE_flags(%rdx)
297 ret
298 .section __ex_table,"a"
299 .quad FLT2,domain_crash_synchronous , FLT3,domain_crash_synchronous
300 .quad FLT4,domain_crash_synchronous , FLT5,domain_crash_synchronous
301 .quad FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
302 .quad FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
303 .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
304 .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
305 .previous
307 domain_crash_synchronous_string:
308 .asciz "domain_crash_sync called from entry.S\n"
310 domain_crash_synchronous:
311 # Get out of the guest-save area of the stack.
312 GET_GUEST_REGS(%rax)
313 movq %rax,%rsp
314 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
315 orb $3,UREGS_cs(%rsp)
316 # printk(domain_crash_synchronous_string)
317 leaq domain_crash_synchronous_string(%rip),%rdi
318 xorl %eax,%eax
319 call printf
320 jmp __domain_crash_synchronous
322 ALIGN
323 /* %rbx: struct vcpu */
324 process_guest_exception_and_events:
325 leaq VCPU_trap_bounce(%rbx),%rdx
326 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
327 jz test_all_events
328 call create_bounce_frame
329 jmp test_all_events
331 ALIGN
332 /* No special register assumptions. */
333 ENTRY(ret_from_intr)
334 GET_CURRENT(%rbx)
335 testb $3,UREGS_cs(%rsp)
336 jnz test_all_events
337 jmp restore_all_xen
339 ALIGN
340 /* No special register assumptions. */
341 error_code:
342 SAVE_ALL
343 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
344 jz exception_with_ints_disabled
345 sti
346 movq %rsp,%rdi
347 movl UREGS_entry_vector(%rsp),%eax
348 leaq exception_table(%rip),%rdx
349 GET_CURRENT(%rbx)
350 PERFC_INCR(PERFC_exceptions, %rax)
351 callq *(%rdx,%rax,8)
352 testb $3,UREGS_cs(%rsp)
353 jz restore_all_xen
354 jmp process_guest_exception_and_events
356 /* No special register assumptions. */
357 exception_with_ints_disabled:
358 testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
359 jnz FATAL_exception_with_ints_disabled
360 movq %rsp,%rdi
361 call search_pre_exception_table
362 testq %rax,%rax # no fixup code for faulting EIP?
363 jz FATAL_exception_with_ints_disabled
364 movq %rax,UREGS_rip(%rsp)
365 subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
366 testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
367 jz 1f # then there is a pad quadword already
368 movq %rsp,%rsi
369 subq $8,%rsp
370 movq %rsp,%rdi
371 movq $UREGS_kernel_sizeof/8,%rcx
372 rep; movsq # make room for ec/ev
373 1: movq UREGS_error_code(%rsp),%rax # ec/ev
374 movq %rax,UREGS_kernel_sizeof(%rsp)
375 jmp restore_all_xen # return to fixup code
377 /* No special register assumptions. */
378 FATAL_exception_with_ints_disabled:
379 movl UREGS_entry_vector(%rsp),%edi
380 movq %rsp,%rsi
381 call fatal_trap
382 ud2
384 ENTRY(divide_error)
385 pushq $0
386 movl $TRAP_divide_error,4(%rsp)
387 jmp error_code
389 ENTRY(coprocessor_error)
390 pushq $0
391 movl $TRAP_copro_error,4(%rsp)
392 jmp error_code
394 ENTRY(simd_coprocessor_error)
395 pushq $0
396 movl $TRAP_simd_error,4(%rsp)
397 jmp error_code
399 ENTRY(device_not_available)
400 pushq $0
401 movl $TRAP_no_device,4(%rsp)
402 jmp error_code
404 ENTRY(debug)
405 pushq $0
406 movl $TRAP_debug,4(%rsp)
407 jmp error_code
409 ENTRY(int3)
410 pushq $0
411 movl $TRAP_int3,4(%rsp)
412 jmp error_code
414 ENTRY(overflow)
415 pushq $0
416 movl $TRAP_overflow,4(%rsp)
417 jmp error_code
419 ENTRY(bounds)
420 pushq $0
421 movl $TRAP_bounds,4(%rsp)
422 jmp error_code
424 ENTRY(invalid_op)
425 pushq $0
426 movl $TRAP_invalid_op,4(%rsp)
427 jmp error_code
429 ENTRY(coprocessor_segment_overrun)
430 pushq $0
431 movl $TRAP_copro_seg,4(%rsp)
432 jmp error_code
434 ENTRY(invalid_TSS)
435 movl $TRAP_invalid_tss,4(%rsp)
436 jmp error_code
438 ENTRY(segment_not_present)
439 movl $TRAP_no_segment,4(%rsp)
440 jmp error_code
442 ENTRY(stack_segment)
443 movl $TRAP_stack_error,4(%rsp)
444 jmp error_code
446 ENTRY(general_protection)
447 movl $TRAP_gp_fault,4(%rsp)
448 jmp error_code
450 ENTRY(alignment_check)
451 movl $TRAP_alignment_check,4(%rsp)
452 jmp error_code
454 ENTRY(page_fault)
455 movl $TRAP_page_fault,4(%rsp)
456 jmp error_code
458 ENTRY(machine_check)
459 pushq $0
460 movl $TRAP_machine_check,4(%rsp)
461 jmp error_code
463 ENTRY(spurious_interrupt_bug)
464 pushq $0
465 movl $TRAP_spurious_int,4(%rsp)
466 jmp error_code
468 ENTRY(double_fault)
469 movl $TRAP_double_fault,4(%rsp)
470 jmp error_code
472 ENTRY(nmi)
473 pushq $0
474 SAVE_ALL
475 testb $3,UREGS_cs(%rsp)
476 jz nmi_in_hypervisor_mode
477 /* Interrupted guest context. Copy the context to stack bottom. */
478 GET_GUEST_REGS(%rbx)
479 movl $UREGS_kernel_sizeof/8,%ecx
480 1: popq %rax
481 movq %rax,(%rbx)
482 addq $8,%rbx
483 loop 1b
484 subq $UREGS_kernel_sizeof,%rbx
485 movq %rbx,%rsp
486 nmi_in_hypervisor_mode:
487 movq %rsp,%rdi
488 call do_nmi
489 jmp ret_from_intr
491 do_arch_sched_op:
492 # Ensure we return success even if we return via schedule_tail()
493 xorl %eax,%eax
494 GET_GUEST_REGS(%r10)
495 movq %rax,UREGS_rax(%r10)
496 jmp do_sched_op
498 do_arch_sched_op_new:
499 # Ensure we return success even if we return via schedule_tail()
500 xorl %eax,%eax
501 GET_GUEST_REGS(%r10)
502 movq %rax,UREGS_rax(%r10)
503 jmp do_sched_op_new
505 .data
507 ENTRY(exception_table)
508 .quad do_divide_error
509 .quad do_debug
510 .quad 0 # nmi
511 .quad do_int3
512 .quad do_overflow
513 .quad do_bounds
514 .quad do_invalid_op
515 .quad math_state_restore
516 .quad do_double_fault
517 .quad do_coprocessor_segment_overrun
518 .quad do_invalid_TSS
519 .quad do_segment_not_present
520 .quad do_stack_segment
521 .quad do_general_protection
522 .quad do_page_fault
523 .quad do_spurious_interrupt_bug
524 .quad do_coprocessor_error
525 .quad do_alignment_check
526 .quad do_machine_check
527 .quad do_simd_coprocessor_error
529 ENTRY(hypercall_table)
530 .quad do_set_trap_table /* 0 */
531 .quad do_mmu_update
532 .quad do_set_gdt
533 .quad do_stack_switch
534 .quad do_set_callbacks
535 .quad do_fpu_taskswitch /* 5 */
536 .quad do_arch_sched_op
537 .quad do_dom0_op
538 .quad do_set_debugreg
539 .quad do_get_debugreg
540 .quad do_update_descriptor /* 10 */
541 .quad do_ni_hypercall
542 .quad do_memory_op
543 .quad do_multicall
544 .quad do_update_va_mapping
545 .quad do_set_timer_op /* 15 */
546 .quad do_event_channel_op
547 .quad do_xen_version
548 .quad do_console_io
549 .quad do_physdev_op
550 .quad do_grant_table_op /* 20 */
551 .quad do_vm_assist
552 .quad do_update_va_mapping_otherdomain
553 .quad do_iret
554 .quad do_vcpu_op
555 .quad do_set_segment_base /* 25 */
556 .quad do_mmuext_op
557 .quad do_acm_op
558 .quad do_nmi_op
559 .quad do_arch_sched_op_new
560 .rept NR_hypercalls-((.-hypercall_table)/8)
561 .quad do_ni_hypercall
562 .endr
564 ENTRY(hypercall_args_table)
565 .byte 1 /* do_set_trap_table */ /* 0 */
566 .byte 4 /* do_mmu_update */
567 .byte 2 /* do_set_gdt */
568 .byte 2 /* do_stack_switch */
569 .byte 3 /* do_set_callbacks */
570 .byte 1 /* do_fpu_taskswitch */ /* 5 */
571 .byte 2 /* do_arch_sched_op */
572 .byte 1 /* do_dom0_op */
573 .byte 2 /* do_set_debugreg */
574 .byte 1 /* do_get_debugreg */
575 .byte 2 /* do_update_descriptor */ /* 10 */
576 .byte 0 /* do_ni_hypercall */
577 .byte 2 /* do_memory_op */
578 .byte 2 /* do_multicall */
579 .byte 3 /* do_update_va_mapping */
580 .byte 1 /* do_set_timer_op */ /* 15 */
581 .byte 1 /* do_event_channel_op */
582 .byte 2 /* do_xen_version */
583 .byte 3 /* do_console_io */
584 .byte 1 /* do_physdev_op */
585 .byte 3 /* do_grant_table_op */ /* 20 */
586 .byte 2 /* do_vm_assist */
587 .byte 4 /* do_update_va_mapping_otherdomain */
588 .byte 0 /* do_iret */
589 .byte 3 /* do_vcpu_op */
590 .byte 2 /* do_set_segment_base */ /* 25 */
591 .byte 4 /* do_mmuext_op */
592 .byte 1 /* do_acm_op */
593 .byte 2 /* do_nmi_op */
594 .byte 2 /* do_arch_sched_op_new */
595 .rept NR_hypercalls-(.-hypercall_args_table)
596 .byte 0 /* do_ni_hypercall */
597 .endr