ia64/xen-unstable

changeset 4700:4a9c0eedfac7

bitkeeper revision 1.1389.1.9 (42715735MS5tAFkKmbN0nXUetfgXZQ)

Remove unnecessary SYMBOL_NAME* macros.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 28 21:35:49 2005 +0000 (2005-04-28)
parents 495fc70ea1ec
children 75a775c40caf
files xen/arch/x86/boot/x86_32.S xen/arch/x86/boot/x86_64.S xen/arch/x86/trampoline.S xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/include/asm-x86/config.h xen/include/asm-x86/x86_32/asm_defns.h xen/include/asm-x86/x86_64/asm_defns.h
line diff
     1.1 --- a/xen/arch/x86/boot/x86_32.S	Thu Apr 28 21:32:24 2005 +0000
     1.2 +++ b/xen/arch/x86/boot/x86_32.S	Thu Apr 28 21:35:49 2005 +0000
     1.3 @@ -24,10 +24,10 @@ bad_cpu_msg:
     1.4  not_multiboot_msg:
     1.5          .asciz "ERR: Not a Multiboot bootloader!"
     1.6  bad_cpu:
     1.7 -        mov     $SYMBOL_NAME(bad_cpu_msg)-__PAGE_OFFSET,%esi
     1.8 +        mov     $bad_cpu_msg-__PAGE_OFFSET,%esi
     1.9          jmp     print_err
    1.10  not_multiboot:
    1.11 -        mov     $SYMBOL_NAME(not_multiboot_msg)-__PAGE_OFFSET,%esi
    1.12 +        mov     $not_multiboot_msg-__PAGE_OFFSET,%esi
    1.13  print_err:
    1.14          mov     $0xB8000,%edi  # VGA framebuffer
    1.15  1:      mov     (%esi),%bl
    1.16 @@ -118,7 +118,7 @@ 1:      stosl   /* low mappings cover as
    1.17          mov     $(__HYPERVISOR_CS << 16),%eax
    1.18          mov     %dx,%ax            /* selector = 0x0010 = cs */
    1.19          mov     $0x8E00,%dx        /* interrupt gate - dpl=0, present */
    1.20 -        lea     SYMBOL_NAME(idt_table)-__PAGE_OFFSET,%edi
    1.21 +        lea     idt_table-__PAGE_OFFSET,%edi
    1.22          mov     $256,%ecx
    1.23  1:      mov     %eax,(%edi)
    1.24          mov     %edx,4(%edi)
    1.25 @@ -163,38 +163,38 @@ ignore_int:
    1.26          mov     %eax,%ds
    1.27          mov     %eax,%es
    1.28          pushl   $int_msg
    1.29 -        call    SYMBOL_NAME(printf)
    1.30 +        call    printf
    1.31  1:      jmp     1b
    1.32  
    1.33  /*** STACK LOCATION ***/
    1.34          
    1.35  ENTRY(stack_start)
    1.36 -        .long SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 - __PAGE_OFFSET
    1.37 +        .long cpu0_stack + STACK_SIZE - 200 - __PAGE_OFFSET
    1.38          .long __HYPERVISOR_DS
    1.39          
    1.40  /*** DESCRIPTOR TABLES ***/
    1.41  
    1.42 -.globl SYMBOL_NAME(idt)
    1.43 -.globl SYMBOL_NAME(gdt)        
    1.44 +.globl idt
    1.45 +.globl gdt        
    1.46  
    1.47          ALIGN
    1.48          
    1.49          .word   0    
    1.50  idt_descr:
    1.51  	.word	256*8-1
    1.52 -SYMBOL_NAME(idt):
    1.53 -        .long	SYMBOL_NAME(idt_table)
    1.54 +idt:
    1.55 +        .long	idt_table
    1.56  
    1.57          .word   0
    1.58  gdt_descr:
    1.59  	.word	(LAST_RESERVED_GDT_ENTRY*8)+7
    1.60 -SYMBOL_NAME(gdt):       
    1.61 -        .long   SYMBOL_NAME(gdt_table)	/* gdt base */
    1.62 +gdt:       
    1.63 +        .long   gdt_table	/* gdt base */
    1.64  
    1.65          .word   0
    1.66  nopaging_gdt_descr:
    1.67          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
    1.68 -        .long   SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
    1.69 +        .long   gdt_table-__PAGE_OFFSET
    1.70          
    1.71          ALIGN
    1.72  /* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
     2.1 --- a/xen/arch/x86/boot/x86_64.S	Thu Apr 28 21:32:24 2005 +0000
     2.2 +++ b/xen/arch/x86/boot/x86_64.S	Thu Apr 28 21:35:49 2005 +0000
     2.3 @@ -7,10 +7,10 @@
     2.4                  
     2.5          .text
     2.6          .code32
     2.7 -        
     2.8 +
     2.9  ENTRY(start)
    2.10          jmp __start
    2.11 -        
    2.12 +
    2.13          .org    0x004
    2.14  /*** MULTIBOOT HEADER ****/
    2.15          /* Magic number indicating a Multiboot header. */
    2.16 @@ -180,8 +180,8 @@ 1:      movq    %rax,(%rdi)
    2.17                          
    2.18  /*** DESCRIPTOR TABLES ***/
    2.19  
    2.20 -.globl SYMBOL_NAME(idt)
    2.21 -.globl SYMBOL_NAME(gdt)        
    2.22 +.globl idt
    2.23 +.globl gdt        
    2.24  
    2.25          .org    0x1f0
    2.26          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
    2.27 @@ -203,17 +203,17 @@ ENTRY(gdt_table)
    2.28          .word   0
    2.29  gdt_descr:
    2.30          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
    2.31 -SYMBOL_NAME(gdt):       
    2.32 -        .quad   SYMBOL_NAME(gdt_table)
    2.33 +gdt:       
    2.34 +        .quad   gdt_table
    2.35  
    2.36          .word   0    
    2.37  idt_descr:
    2.38          .word   256*16-1
    2.39 -SYMBOL_NAME(idt):
    2.40 -        .quad   SYMBOL_NAME(idt_table)
    2.41 +idt:
    2.42 +        .quad   idt_table
    2.43  
    2.44  ENTRY(stack_start)
    2.45 -        .quad   SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200
    2.46 +        .quad   cpu0_stack + STACK_SIZE - 200
    2.47  
    2.48  high_start:
    2.49          .quad   __high_start
    2.50 @@ -258,6 +258,6 @@ int_msg:
    2.51  ignore_int:
    2.52          cld
    2.53          leaq    int_msg(%rip),%rdi
    2.54 -        call    SYMBOL_NAME(printf)
    2.55 +        call    printf
    2.56  1:      jmp     1b
    2.57  
     3.1 --- a/xen/arch/x86/trampoline.S	Thu Apr 28 21:32:24 2005 +0000
     3.2 +++ b/xen/arch/x86/trampoline.S	Thu Apr 28 21:35:49 2005 +0000
     3.3 @@ -60,8 +60,7 @@ gdt_48:
     3.4  #else
     3.5  	.long   0x100200 # gdt_table
     3.6  #endif
     3.7 -        
     3.8 -.globl SYMBOL_NAME(trampoline_end)
     3.9 -SYMBOL_NAME_LABEL(trampoline_end)
    3.10 +
    3.11 +ENTRY(trampoline_end)
    3.12  
    3.13  #endif /* CONFIG_SMP */
     4.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Apr 28 21:32:24 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_32/entry.S	Thu Apr 28 21:35:49 2005 +0000
     4.3 @@ -111,7 +111,7 @@
     4.4  ENTRY(vmx_asm_vmexit_handler)
     4.5          /* selectors are restored/saved by VMX */
     4.6          VMX_SAVE_ALL_NOSEGREGS
     4.7 -        call SYMBOL_NAME(vmx_vmexit_handler)
     4.8 +        call vmx_vmexit_handler
     4.9          jmp vmx_asm_do_resume
    4.10  
    4.11  ENTRY(vmx_asm_do_launch)
    4.12 @@ -126,7 +126,7 @@ ENTRY(vmx_asm_do_launch)
    4.13          /* VMLUANCH */
    4.14          .byte 0x0f,0x01,0xc2
    4.15          pushf
    4.16 -        call SYMBOL_NAME(vm_launch_fail)
    4.17 +        call vm_launch_fail
    4.18          hlt
    4.19          
    4.20          ALIGN
    4.21 @@ -141,11 +141,11 @@ vmx_test_all_events:
    4.22  /*test_softirqs:*/  
    4.23          movl EDOMAIN_processor(%ebx),%eax
    4.24          shl  $IRQSTAT_shift,%eax
    4.25 -        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
    4.26 +        test %ecx,irq_stat(%eax,1)
    4.27          jnz  vmx_process_softirqs
    4.28  
    4.29  vmx_restore_all_guest:
    4.30 -        call SYMBOL_NAME(load_cr2)
    4.31 +        call load_cr2
    4.32          /* 
    4.33           * Check if we are going back to VMX-based VM
    4.34           * By this time, all the setups in the VMCS must be complete.
    4.35 @@ -161,14 +161,14 @@ vmx_restore_all_guest:
    4.36          /* VMRESUME */
    4.37          .byte 0x0f,0x01,0xc3
    4.38          pushf
    4.39 -        call SYMBOL_NAME(vm_resume_fail)
    4.40 +        call vm_resume_fail
    4.41          /* Should never reach here */
    4.42          hlt
    4.43  
    4.44          ALIGN
    4.45  vmx_process_softirqs:
    4.46          sti       
    4.47 -        call SYMBOL_NAME(do_softirq)
    4.48 +        call do_softirq
    4.49          jmp  vmx_test_all_events
    4.50  #endif
    4.51  
    4.52 @@ -261,7 +261,7 @@ ENTRY(hypercall)
    4.53          GET_CURRENT(%ebx)
    4.54          andl $(NR_hypercalls-1),%eax
    4.55          PERFC_INCR(PERFC_hypercalls, %eax)
    4.56 -        call *SYMBOL_NAME(hypercall_table)(,%eax,4)
    4.57 +        call *hypercall_table(,%eax,4)
    4.58          movl %eax,UREGS_eax(%esp)       # save the return value
    4.59  
    4.60  test_all_events:
    4.61 @@ -271,7 +271,7 @@ test_all_events:
    4.62  /*test_softirqs:*/  
    4.63          movl EDOMAIN_processor(%ebx),%eax
    4.64          shl  $IRQSTAT_shift,%eax
    4.65 -        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
    4.66 +        test %ecx,irq_stat(%eax,1)
    4.67          jnz  process_softirqs
    4.68  /*test_guest_events:*/
    4.69          movl EDOMAIN_vcpu_info(%ebx),%eax
    4.70 @@ -295,7 +295,7 @@ test_all_events:
    4.71          ALIGN
    4.72  process_softirqs:
    4.73          sti       
    4.74 -        call SYMBOL_NAME(do_softirq)
    4.75 +        call do_softirq
    4.76          jmp  test_all_events
    4.77                  
    4.78  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
    4.79 @@ -431,7 +431,7 @@ error_code:
    4.80  	pushl %edx			# push the cpu_user_regs pointer
    4.81  	GET_CURRENT(%ebx)
    4.82          PERFC_INCR(PERFC_exceptions, %eax)
    4.83 -	call  *SYMBOL_NAME(exception_table)(,%eax,4)
    4.84 +	call  *exception_table(,%eax,4)
    4.85          addl  $4,%esp
    4.86          movl  UREGS_eflags(%esp),%eax
    4.87          movb  UREGS_cs(%esp),%al
    4.88 @@ -465,7 +465,7 @@ FATAL_exception_with_ints_disabled:
    4.89          movl  %esp,%edx
    4.90  	pushl %edx			# push the cpu_user_regs pointer
    4.91          pushl %esi                      # push the trapnr (entry vector)
    4.92 -        call  SYMBOL_NAME(fatal_trap)
    4.93 +        call  fatal_trap
    4.94          ud2
    4.95                                          
    4.96  ENTRY(coprocessor_error)
    4.97 @@ -575,7 +575,7 @@ do_watchdog_tick:
    4.98          movl  %esp,%edx
    4.99          pushl %ebx   # reason
   4.100          pushl %edx   # regs
   4.101 -        call  SYMBOL_NAME(do_nmi)
   4.102 +        call  do_nmi
   4.103          addl  $8,%esp
   4.104          jmp   ret_from_intr
   4.105  
   4.106 @@ -595,18 +595,18 @@ nmi_parity_err:
   4.107          andb $0xf,%al
   4.108          orb  $0x4,%al
   4.109          outb %al,$0x61
   4.110 -        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
   4.111 +        cmpb $'i',%ss:opt_nmi # nmi=ignore
   4.112          je   nmi_out
   4.113 -        bts  $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
   4.114 -        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
   4.115 -        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
   4.116 +        bts  $0,%ss:nmi_softirq_reason
   4.117 +        bts  $NMI_SOFTIRQ,%ss:irq_stat
   4.118 +        cmpb $'d',%ss:opt_nmi # nmi=dom0
   4.119          je   nmi_out
   4.120          movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
   4.121          movl %edx,%ds
   4.122          movl %edx,%es
   4.123          movl %esp,%edx
   4.124          push %edx
   4.125 -        call SYMBOL_NAME(mem_parity_error)
   4.126 +        call mem_parity_error
   4.127          addl $4,%esp
   4.128  nmi_out:movl  %ss:UREGS_eflags(%esp),%eax
   4.129          movb  %ss:UREGS_cs(%esp),%al
   4.130 @@ -623,18 +623,18 @@ nmi_io_err:
   4.131          andb $0xf,%al
   4.132          orb  $0x8,%al
   4.133          outb %al,$0x61
   4.134 -        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
   4.135 +        cmpb $'i',%ss:opt_nmi # nmi=ignore
   4.136          je   nmi_out
   4.137 -        bts  $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
   4.138 -        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
   4.139 -        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
   4.140 +        bts  $1,%ss:nmi_softirq_reason
   4.141 +        bts  $NMI_SOFTIRQ,%ss:irq_stat
   4.142 +        cmpb $'d',%ss:opt_nmi # nmi=dom0
   4.143          je   nmi_out
   4.144          movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
   4.145          movl %edx,%ds
   4.146          movl %edx,%es
   4.147          movl %esp,%edx
   4.148          push %edx
   4.149 -        call SYMBOL_NAME(io_check_error)                        
   4.150 +        call io_check_error                        
   4.151          addl $4,%esp
   4.152          jmp  nmi_out
   4.153  
   4.154 @@ -656,7 +656,7 @@ do_arch_sched_op:
   4.155          # Ensure we return success even if we return via schedule_tail()
   4.156          xorl %eax,%eax
   4.157          movl %eax,UREGS_eax+4(%esp)
   4.158 -        jmp  SYMBOL_NAME(do_sched_op)
   4.159 +        jmp  do_sched_op
   4.160  
   4.161  do_switch_vm86:
   4.162          # Discard the return address
   4.163 @@ -696,55 +696,55 @@ VFLT3:  movl %gs:(%esi),%eax
   4.164  .data
   4.165  
   4.166  ENTRY(exception_table)
   4.167 -        .long SYMBOL_NAME(do_divide_error)
   4.168 -        .long SYMBOL_NAME(do_debug)
   4.169 +        .long do_divide_error
   4.170 +        .long do_debug
   4.171          .long 0 # nmi
   4.172 -        .long SYMBOL_NAME(do_int3)
   4.173 -        .long SYMBOL_NAME(do_overflow)
   4.174 -        .long SYMBOL_NAME(do_bounds)
   4.175 -        .long SYMBOL_NAME(do_invalid_op)
   4.176 -        .long SYMBOL_NAME(math_state_restore)
   4.177 +        .long do_int3
   4.178 +        .long do_overflow
   4.179 +        .long do_bounds
   4.180 +        .long do_invalid_op
   4.181 +        .long math_state_restore
   4.182          .long 0 # double fault
   4.183 -        .long SYMBOL_NAME(do_coprocessor_segment_overrun)
   4.184 -        .long SYMBOL_NAME(do_invalid_TSS)
   4.185 -        .long SYMBOL_NAME(do_segment_not_present)
   4.186 -        .long SYMBOL_NAME(do_stack_segment)
   4.187 -        .long SYMBOL_NAME(do_general_protection)
   4.188 -        .long SYMBOL_NAME(do_page_fault)
   4.189 -        .long SYMBOL_NAME(do_spurious_interrupt_bug)
   4.190 -        .long SYMBOL_NAME(do_coprocessor_error)
   4.191 -        .long SYMBOL_NAME(do_alignment_check)
   4.192 -        .long SYMBOL_NAME(do_machine_check)
   4.193 -        .long SYMBOL_NAME(do_simd_coprocessor_error)
   4.194 +        .long do_coprocessor_segment_overrun
   4.195 +        .long do_invalid_TSS
   4.196 +        .long do_segment_not_present
   4.197 +        .long do_stack_segment
   4.198 +        .long do_general_protection
   4.199 +        .long do_page_fault
   4.200 +        .long do_spurious_interrupt_bug
   4.201 +        .long do_coprocessor_error
   4.202 +        .long do_alignment_check
   4.203 +        .long do_machine_check
   4.204 +        .long do_simd_coprocessor_error
   4.205  
   4.206  ENTRY(hypercall_table)
   4.207 -        .long SYMBOL_NAME(do_set_trap_table)     /*  0 */
   4.208 -        .long SYMBOL_NAME(do_mmu_update)
   4.209 -        .long SYMBOL_NAME(do_set_gdt)
   4.210 -        .long SYMBOL_NAME(do_stack_switch)
   4.211 -        .long SYMBOL_NAME(do_set_callbacks)
   4.212 -        .long SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
   4.213 -        .long SYMBOL_NAME(do_arch_sched_op)
   4.214 -        .long SYMBOL_NAME(do_dom0_op)
   4.215 -        .long SYMBOL_NAME(do_set_debugreg)
   4.216 -        .long SYMBOL_NAME(do_get_debugreg)
   4.217 -        .long SYMBOL_NAME(do_update_descriptor)  /* 10 */
   4.218 -        .long SYMBOL_NAME(do_set_fast_trap)
   4.219 -        .long SYMBOL_NAME(do_dom_mem_op)
   4.220 -        .long SYMBOL_NAME(do_multicall)
   4.221 -        .long SYMBOL_NAME(do_update_va_mapping)
   4.222 -        .long SYMBOL_NAME(do_set_timer_op)       /* 15 */
   4.223 -        .long SYMBOL_NAME(do_event_channel_op)
   4.224 -        .long SYMBOL_NAME(do_xen_version)
   4.225 -        .long SYMBOL_NAME(do_console_io)
   4.226 -        .long SYMBOL_NAME(do_physdev_op)
   4.227 -        .long SYMBOL_NAME(do_grant_table_op)     /* 20 */
   4.228 -        .long SYMBOL_NAME(do_vm_assist)
   4.229 -        .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
   4.230 -        .long SYMBOL_NAME(do_switch_vm86)
   4.231 -        .long SYMBOL_NAME(do_boot_vcpu)
   4.232 -        .long SYMBOL_NAME(do_ni_hypercall)       /* 25 */
   4.233 -        .long SYMBOL_NAME(do_mmuext_op)
   4.234 +        .long do_set_trap_table     /*  0 */
   4.235 +        .long do_mmu_update
   4.236 +        .long do_set_gdt
   4.237 +        .long do_stack_switch
   4.238 +        .long do_set_callbacks
   4.239 +        .long do_fpu_taskswitch     /*  5 */
   4.240 +        .long do_arch_sched_op
   4.241 +        .long do_dom0_op
   4.242 +        .long do_set_debugreg
   4.243 +        .long do_get_debugreg
   4.244 +        .long do_update_descriptor  /* 10 */
   4.245 +        .long do_set_fast_trap
   4.246 +        .long do_dom_mem_op
   4.247 +        .long do_multicall
   4.248 +        .long do_update_va_mapping
   4.249 +        .long do_set_timer_op       /* 15 */
   4.250 +        .long do_event_channel_op
   4.251 +        .long do_xen_version
   4.252 +        .long do_console_io
   4.253 +        .long do_physdev_op
   4.254 +        .long do_grant_table_op     /* 20 */
   4.255 +        .long do_vm_assist
   4.256 +        .long do_update_va_mapping_otherdomain
   4.257 +        .long do_switch_vm86
   4.258 +        .long do_boot_vcpu
   4.259 +        .long do_ni_hypercall       /* 25 */
   4.260 +        .long do_mmuext_op
   4.261          .rept NR_hypercalls-((.-hypercall_table)/4)
   4.262 -        .long SYMBOL_NAME(do_ni_hypercall)
   4.263 +        .long do_ni_hypercall
   4.264          .endr
     5.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Apr 28 21:32:24 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_64/entry.S	Thu Apr 28 21:35:49 2005 +0000
     5.3 @@ -120,7 +120,7 @@ ENTRY(syscall_enter)
     5.4          sti
     5.5          movq  %r10,%rcx
     5.6          andq  $(NR_hypercalls-1),%rax
     5.7 -        leaq  SYMBOL_NAME(hypercall_table)(%rip),%r10
     5.8 +        leaq  hypercall_table(%rip),%r10
     5.9          PERFC_INCR(PERFC_hypercalls, %rax)
    5.10          callq *(%r10,%rax,8)
    5.11          movq %rax,UREGS_rax(%rsp)       # save the return value
    5.12 @@ -131,7 +131,7 @@ test_all_events:
    5.13  /*test_softirqs:*/  
    5.14          movl  EDOMAIN_processor(%rbx),%eax
    5.15          shl   $IRQSTAT_shift,%rax
    5.16 -        leaq  SYMBOL_NAME(irq_stat)(%rip),%rcx
    5.17 +        leaq  irq_stat(%rip),%rcx
    5.18          testl $~0,(%rcx,%rax,1)
    5.19          jnz   process_softirqs
    5.20  /*test_guest_events:*/
    5.21 @@ -199,7 +199,7 @@ test_all_events:
    5.22  ENTRY(vmx_asm_vmexit_handler)
    5.23          /* selectors are restored/saved by VMX */
    5.24          VMX_SAVE_ALL_NOSEGREGS
    5.25 -        call SYMBOL_NAME(vmx_vmexit_handler)
    5.26 +        call vmx_vmexit_handler
    5.27          jmp vmx_asm_do_resume
    5.28  
    5.29  ENTRY(vmx_asm_do_launch)
    5.30 @@ -222,7 +222,7 @@ ENTRY(vmx_asm_do_launch)
    5.31          /* VMLUANCH */
    5.32          .byte 0x0f,0x01,0xc2
    5.33          pushfq
    5.34 -        call SYMBOL_NAME(vm_launch_fail)
    5.35 +        call vm_launch_fail
    5.36          hlt
    5.37          
    5.38          ALIGN
    5.39 @@ -235,12 +235,12 @@ vmx_test_all_events:
    5.40  /*test_softirqs:*/  
    5.41          movl  EDOMAIN_processor(%rbx),%eax
    5.42          shl   $IRQSTAT_shift,%rax
    5.43 -        leaq  SYMBOL_NAME(irq_stat)(%rip), %rdx
    5.44 +        leaq  irq_stat(%rip), %rdx
    5.45          testl $~0,(%rdx,%rax,1)
    5.46          jnz   vmx_process_softirqs
    5.47  
    5.48  vmx_restore_all_guest:
    5.49 -        call SYMBOL_NAME(load_cr2)
    5.50 +        call load_cr2
    5.51          /* 
    5.52           * Check if we are going back to VMX-based VM
    5.53           * By this time, all the setups in the VMCS must be complete.
    5.54 @@ -264,14 +264,14 @@ vmx_restore_all_guest:
    5.55          /* VMRESUME */
    5.56          .byte 0x0f,0x01,0xc3
    5.57          pushfq
    5.58 -        call SYMBOL_NAME(vm_resume_fail)
    5.59 +        call vm_resume_fail
    5.60          /* Should never reach here */
    5.61          hlt
    5.62  
    5.63          ALIGN
    5.64  vmx_process_softirqs:
    5.65          sti       
    5.66 -        call SYMBOL_NAME(do_softirq)
    5.67 +        call do_softirq
    5.68          jmp  vmx_test_all_events
    5.69  #endif
    5.70  
    5.71 @@ -279,7 +279,7 @@ vmx_process_softirqs:
    5.72  /* %rbx: struct exec_domain */
    5.73  process_softirqs:
    5.74          sti       
    5.75 -        call SYMBOL_NAME(do_softirq)
    5.76 +        call do_softirq
    5.77          jmp  test_all_events
    5.78  
    5.79  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     */
    5.80 @@ -292,7 +292,7 @@ create_bounce_frame:
    5.81          /* Push new frame at registered guest-OS stack base. */
    5.82          pushq %rdx
    5.83          movq  %rbx,%rdi
    5.84 -        call  SYMBOL_NAME(toggle_guest_mode)
    5.85 +        call  toggle_guest_mode
    5.86          popq  %rdx
    5.87          movq  EDOMAIN_kernel_sp(%rbx),%rsi
    5.88          jmp   2f
    5.89 @@ -390,7 +390,7 @@ error_code:
    5.90          sti
    5.91          movq  %rsp,%rdi
    5.92          movl  UREGS_entry_vector(%rsp),%eax
    5.93 -        leaq  SYMBOL_NAME(exception_table)(%rip),%rdx
    5.94 +        leaq  exception_table(%rip),%rdx
    5.95          GET_CURRENT(%rbx)
    5.96          PERFC_INCR(PERFC_exceptions, %rax)
    5.97          callq *(%rdx,%rax,8)
    5.98 @@ -423,7 +423,7 @@ 1:      movq  UREGS_error_code(%rsp),%ra
    5.99  FATAL_exception_with_ints_disabled:
   5.100          movl  UREGS_entry_vector(%rsp),%edi
   5.101          movq  %rsp,%rsi
   5.102 -        call  SYMBOL_NAME(fatal_trap)
   5.103 +        call  fatal_trap
   5.104          ud2
   5.105  
   5.106  ENTRY(divide_error)
   5.107 @@ -520,67 +520,67 @@ ENTRY(nmi)
   5.108          inb   $0x61,%al
   5.109          movl  %eax,%esi # reason
   5.110          movq  %rsp,%rdi # regs
   5.111 -        call  SYMBOL_NAME(do_nmi)
   5.112 +        call  do_nmi
   5.113  	jmp   restore_all_xen
   5.114  
   5.115  do_arch_sched_op:
   5.116          # Ensure we return success even if we return via schedule_tail()
   5.117          xorl  %eax,%eax
   5.118          movq  %rax,UREGS_rax+8(%rsp)
   5.119 -        jmp   SYMBOL_NAME(do_sched_op)
   5.120 +        jmp   do_sched_op
   5.121  
   5.122  .data
   5.123  
   5.124  ENTRY(exception_table)
   5.125 -        .quad SYMBOL_NAME(do_divide_error)
   5.126 -        .quad SYMBOL_NAME(do_debug)
   5.127 +        .quad do_divide_error
   5.128 +        .quad do_debug
   5.129          .quad 0 # nmi
   5.130 -        .quad SYMBOL_NAME(do_int3)
   5.131 -        .quad SYMBOL_NAME(do_overflow)
   5.132 -        .quad SYMBOL_NAME(do_bounds)
   5.133 -        .quad SYMBOL_NAME(do_invalid_op)
   5.134 -        .quad SYMBOL_NAME(math_state_restore)
   5.135 -        .quad SYMBOL_NAME(do_double_fault)
   5.136 -        .quad SYMBOL_NAME(do_coprocessor_segment_overrun)
   5.137 -        .quad SYMBOL_NAME(do_invalid_TSS)
   5.138 -        .quad SYMBOL_NAME(do_segment_not_present)
   5.139 -        .quad SYMBOL_NAME(do_stack_segment)
   5.140 -        .quad SYMBOL_NAME(do_general_protection)
   5.141 -        .quad SYMBOL_NAME(do_page_fault)
   5.142 -        .quad SYMBOL_NAME(do_spurious_interrupt_bug)
   5.143 -        .quad SYMBOL_NAME(do_coprocessor_error)
   5.144 -        .quad SYMBOL_NAME(do_alignment_check)
   5.145 -        .quad SYMBOL_NAME(do_machine_check)
   5.146 -        .quad SYMBOL_NAME(do_simd_coprocessor_error)
   5.147 +        .quad do_int3
   5.148 +        .quad do_overflow
   5.149 +        .quad do_bounds
   5.150 +        .quad do_invalid_op
   5.151 +        .quad math_state_restore
   5.152 +        .quad do_double_fault
   5.153 +        .quad do_coprocessor_segment_overrun
   5.154 +        .quad do_invalid_TSS
   5.155 +        .quad do_segment_not_present
   5.156 +        .quad do_stack_segment
   5.157 +        .quad do_general_protection
   5.158 +        .quad do_page_fault
   5.159 +        .quad do_spurious_interrupt_bug
   5.160 +        .quad do_coprocessor_error
   5.161 +        .quad do_alignment_check
   5.162 +        .quad do_machine_check
   5.163 +        .quad do_simd_coprocessor_error
   5.164  
   5.165  ENTRY(hypercall_table)
   5.166 -        .quad SYMBOL_NAME(do_set_trap_table)     /*  0 */
   5.167 -        .quad SYMBOL_NAME(do_mmu_update)
   5.168 -        .quad SYMBOL_NAME(do_set_gdt)
   5.169 -        .quad SYMBOL_NAME(do_stack_switch)
   5.170 -        .quad SYMBOL_NAME(do_set_callbacks)
   5.171 -        .quad SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
   5.172 -        .quad SYMBOL_NAME(do_arch_sched_op)
   5.173 -        .quad SYMBOL_NAME(do_dom0_op)
   5.174 -        .quad SYMBOL_NAME(do_set_debugreg)
   5.175 -        .quad SYMBOL_NAME(do_get_debugreg)
   5.176 -        .quad SYMBOL_NAME(do_update_descriptor)  /* 10 */
   5.177 -        .quad SYMBOL_NAME(do_ni_hypercall)
   5.178 -        .quad SYMBOL_NAME(do_dom_mem_op)
   5.179 -        .quad SYMBOL_NAME(do_multicall)
   5.180 -        .quad SYMBOL_NAME(do_update_va_mapping)
   5.181 -        .quad SYMBOL_NAME(do_set_timer_op)       /* 15 */
   5.182 -        .quad SYMBOL_NAME(do_event_channel_op)
   5.183 -        .quad SYMBOL_NAME(do_xen_version)
   5.184 -        .quad SYMBOL_NAME(do_console_io)
   5.185 -        .quad SYMBOL_NAME(do_physdev_op)
   5.186 -        .quad SYMBOL_NAME(do_grant_table_op)     /* 20 */
   5.187 -        .quad SYMBOL_NAME(do_vm_assist)
   5.188 -        .quad SYMBOL_NAME(do_update_va_mapping_otherdomain)
   5.189 -        .quad SYMBOL_NAME(do_switch_to_user)
   5.190 -        .quad SYMBOL_NAME(do_boot_vcpu)
   5.191 -        .quad SYMBOL_NAME(do_set_segment_base)   /* 25 */
   5.192 -        .quad SYMBOL_NAME(do_mmuext_op)
   5.193 +        .quad do_set_trap_table     /*  0 */
   5.194 +        .quad do_mmu_update
   5.195 +        .quad do_set_gdt
   5.196 +        .quad do_stack_switch
   5.197 +        .quad do_set_callbacks
   5.198 +        .quad do_fpu_taskswitch     /*  5 */
   5.199 +        .quad do_arch_sched_op
   5.200 +        .quad do_dom0_op
   5.201 +        .quad do_set_debugreg
   5.202 +        .quad do_get_debugreg
   5.203 +        .quad do_update_descriptor  /* 10 */
   5.204 +        .quad do_ni_hypercall
   5.205 +        .quad do_dom_mem_op
   5.206 +        .quad do_multicall
   5.207 +        .quad do_update_va_mapping
   5.208 +        .quad do_set_timer_op       /* 15 */
   5.209 +        .quad do_event_channel_op
   5.210 +        .quad do_xen_version
   5.211 +        .quad do_console_io
   5.212 +        .quad do_physdev_op
   5.213 +        .quad do_grant_table_op     /* 20 */
   5.214 +        .quad do_vm_assist
   5.215 +        .quad do_update_va_mapping_otherdomain
   5.216 +        .quad do_switch_to_user
   5.217 +        .quad do_boot_vcpu
   5.218 +        .quad do_set_segment_base   /* 25 */
   5.219 +        .quad do_mmuext_op
   5.220          .rept NR_hypercalls-((.-hypercall_table)/4)
   5.221 -        .quad SYMBOL_NAME(do_ni_hypercall)
   5.222 +        .quad do_ni_hypercall
   5.223          .endr
     6.1 --- a/xen/include/asm-x86/config.h	Thu Apr 28 21:32:24 2005 +0000
     6.2 +++ b/xen/include/asm-x86/config.h	Thu Apr 28 21:35:49 2005 +0000
     6.3 @@ -64,16 +64,13 @@
     6.4  /* Linkage for x86 */
     6.5  #define __ALIGN .align 16,0x90
     6.6  #define __ALIGN_STR ".align 16,0x90"
     6.7 -#define SYMBOL_NAME_STR(X) #X
     6.8 -#define SYMBOL_NAME(X) X
     6.9 -#define SYMBOL_NAME_LABEL(X) X##:
    6.10  #ifdef __ASSEMBLY__
    6.11  #define ALIGN __ALIGN
    6.12  #define ALIGN_STR __ALIGN_STR
    6.13 -#define ENTRY(name) \
    6.14 -  .globl SYMBOL_NAME(name); \
    6.15 -  ALIGN; \
    6.16 -  SYMBOL_NAME_LABEL(name)
    6.17 +#define ENTRY(name)                             \
    6.18 +  .globl name;                                  \
    6.19 +  ALIGN;                                        \
    6.20 +  name:
    6.21  #endif
    6.22  
    6.23  #define barrier() __asm__ __volatile__("": : :"memory")
     7.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Thu Apr 28 21:32:24 2005 +0000
     7.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Thu Apr 28 21:35:49 2005 +0000
     7.3 @@ -4,81 +4,81 @@
     7.4  /* Maybe auto-generate the following two cases (quoted vs. unquoted). */
     7.5  #ifndef __ASSEMBLY__
     7.6  
     7.7 -#define __SAVE_ALL_PRE \
     7.8 -        "cld;" \
     7.9 -        "pushl %eax;" \
    7.10 -        "pushl %ebp;" \
    7.11 -        "pushl %edi;" \
    7.12 -        "pushl %esi;" \
    7.13 -        "pushl %edx;" \
    7.14 -        "pushl %ecx;" \
    7.15 -        "pushl %ebx;" \
    7.16 -        "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \
    7.17 -        "jz 2f;" \
    7.18 -        "call setup_vm86_frame;" \
    7.19 -        "jmp 3f;" \
    7.20 -        "2:testb $3,"STR(UREGS_cs)"(%esp);" \
    7.21 -        "jz 1f;" \
    7.22 -        "movl %ds,"STR(UREGS_ds)"(%esp);" \
    7.23 -        "movl %es,"STR(UREGS_es)"(%esp);" \
    7.24 -        "movl %fs,"STR(UREGS_fs)"(%esp);" \
    7.25 -        "movl %gs,"STR(UREGS_gs)"(%esp);" \
    7.26 +#define __SAVE_ALL_PRE                                                  \
    7.27 +        "cld;"                                                          \
    7.28 +        "pushl %eax;"                                                   \
    7.29 +        "pushl %ebp;"                                                   \
    7.30 +        "pushl %edi;"                                                   \
    7.31 +        "pushl %esi;"                                                   \
    7.32 +        "pushl %edx;"                                                   \
    7.33 +        "pushl %ecx;"                                                   \
    7.34 +        "pushl %ebx;"                                                   \
    7.35 +        "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);"        \
    7.36 +        "jz 2f;"                                                        \
    7.37 +        "call setup_vm86_frame;"                                        \
    7.38 +        "jmp 3f;"                                                       \
    7.39 +        "2:testb $3,"STR(UREGS_cs)"(%esp);"                             \
    7.40 +        "jz 1f;"                                                        \
    7.41 +        "movl %ds,"STR(UREGS_ds)"(%esp);"                               \
    7.42 +        "movl %es,"STR(UREGS_es)"(%esp);"                               \
    7.43 +        "movl %fs,"STR(UREGS_fs)"(%esp);"                               \
    7.44 +        "movl %gs,"STR(UREGS_gs)"(%esp);"                               \
    7.45          "3:"
    7.46  
    7.47 -#define SAVE_ALL_NOSEGREGS(_reg) \
    7.48 -        __SAVE_ALL_PRE \
    7.49 +#define SAVE_ALL_NOSEGREGS(_reg)                \
    7.50 +        __SAVE_ALL_PRE                          \
    7.51          "1:"
    7.52  
    7.53 -#define SET_XEN_SEGMENTS(_reg) \
    7.54 -        "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \
    7.55 -        "movl %e"STR(_reg)"x,%ds;" \
    7.56 +#define SET_XEN_SEGMENTS(_reg)                                  \
    7.57 +        "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;"        \
    7.58 +        "movl %e"STR(_reg)"x,%ds;"                              \
    7.59          "movl %e"STR(_reg)"x,%es;"
    7.60  
    7.61 -#define SAVE_ALL(_reg) \
    7.62 -        __SAVE_ALL_PRE \
    7.63 -        SET_XEN_SEGMENTS(_reg) \
    7.64 +#define SAVE_ALL(_reg)                          \
    7.65 +        __SAVE_ALL_PRE                          \
    7.66 +        SET_XEN_SEGMENTS(_reg)                  \
    7.67          "1:"
    7.68  
    7.69  #else
    7.70  
    7.71 -#define __SAVE_ALL_PRE \
    7.72 -        cld; \
    7.73 -        pushl %eax; \
    7.74 -        pushl %ebp; \
    7.75 -        pushl %edi; \
    7.76 -        pushl %esi; \
    7.77 -        pushl %edx; \
    7.78 -        pushl %ecx; \
    7.79 -        pushl %ebx; \
    7.80 -        testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \
    7.81 -        jz 2f; \
    7.82 -        call setup_vm86_frame; \
    7.83 -        jmp 3f; \
    7.84 -        2:testb $3,UREGS_cs(%esp); \
    7.85 -        jz 1f; \
    7.86 -        movl %ds,UREGS_ds(%esp); \
    7.87 -        movl %es,UREGS_es(%esp); \
    7.88 -        movl %fs,UREGS_fs(%esp); \
    7.89 -        movl %gs,UREGS_gs(%esp); \
    7.90 +#define __SAVE_ALL_PRE                                  \
    7.91 +        cld;                                            \
    7.92 +        pushl %eax;                                     \
    7.93 +        pushl %ebp;                                     \
    7.94 +        pushl %edi;                                     \
    7.95 +        pushl %esi;                                     \
    7.96 +        pushl %edx;                                     \
    7.97 +        pushl %ecx;                                     \
    7.98 +        pushl %ebx;                                     \
    7.99 +        testl $X86_EFLAGS_VM,UREGS_eflags(%esp);        \
   7.100 +        jz 2f;                                          \
   7.101 +        call setup_vm86_frame;                          \
   7.102 +        jmp 3f;                                         \
   7.103 +        2:testb $3,UREGS_cs(%esp);                      \
   7.104 +        jz 1f;                                          \
   7.105 +        movl %ds,UREGS_ds(%esp);                        \
   7.106 +        movl %es,UREGS_es(%esp);                        \
   7.107 +        movl %fs,UREGS_fs(%esp);                        \
   7.108 +        movl %gs,UREGS_gs(%esp);                        \
   7.109          3:
   7.110  
   7.111 -#define SAVE_ALL_NOSEGREGS(_reg) \
   7.112 -        __SAVE_ALL_PRE \
   7.113 +#define SAVE_ALL_NOSEGREGS(_reg)                \
   7.114 +        __SAVE_ALL_PRE                          \
   7.115          1:
   7.116  
   7.117 -#define SET_XEN_SEGMENTS(_reg) \
   7.118 -        movl $(__HYPERVISOR_DS),%e ## _reg ## x; \
   7.119 -        movl %e ## _reg ## x,%ds; \
   7.120 +#define SET_XEN_SEGMENTS(_reg)                          \
   7.121 +        movl $(__HYPERVISOR_DS),%e ## _reg ## x;        \
   7.122 +        movl %e ## _reg ## x,%ds;                       \
   7.123          movl %e ## _reg ## x,%es;
   7.124  
   7.125 -#define SAVE_ALL(_reg) \
   7.126 -        __SAVE_ALL_PRE \
   7.127 -        SET_XEN_SEGMENTS(_reg) \
   7.128 +#define SAVE_ALL(_reg)                          \
   7.129 +        __SAVE_ALL_PRE                          \
   7.130 +        SET_XEN_SEGMENTS(_reg)                  \
   7.131          1:
   7.132  
   7.133  #ifdef PERF_COUNTERS
   7.134 -#define PERFC_INCR(_name,_idx) \
   7.135 -    lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4)
   7.136 +#define PERFC_INCR(_name,_idx)                          \
   7.137 +    lock incl perfcounters+_name(,_idx,4)
   7.138  #else
   7.139  #define PERFC_INCR(_name,_idx)
   7.140  #endif
   7.141 @@ -86,50 +86,50 @@
   7.142  #endif
   7.143  
   7.144  #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
   7.145 -#define XBUILD_SMP_INTERRUPT(x,v)\
   7.146 -asmlinkage void x(void); \
   7.147 -__asm__( \
   7.148 -    "\n"__ALIGN_STR"\n" \
   7.149 -    SYMBOL_NAME_STR(x) ":\n\t" \
   7.150 -    "pushl $"#v"<<16\n\t" \
   7.151 -    SAVE_ALL(a) \
   7.152 -    "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
   7.153 +#define XBUILD_SMP_INTERRUPT(x,v)               \
   7.154 +asmlinkage void x(void);                        \
   7.155 +__asm__(                                        \
   7.156 +    "\n"__ALIGN_STR"\n"                         \
   7.157 +    STR(x) ":\n\t"                              \
   7.158 +    "pushl $"#v"<<16\n\t"                       \
   7.159 +    SAVE_ALL(a)                                 \
   7.160 +    "call "STR(smp_##x)"\n\t"                   \
   7.161      "jmp ret_from_intr\n");
   7.162  
   7.163  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
   7.164 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
   7.165 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
   7.166  asmlinkage void x(struct cpu_user_regs * regs); \
   7.167 -__asm__( \
   7.168 -"\n"__ALIGN_STR"\n" \
   7.169 -SYMBOL_NAME_STR(x) ":\n\t" \
   7.170 -    "pushl $"#v"<<16\n\t" \
   7.171 -    SAVE_ALL(a) \
   7.172 -    "movl %esp,%eax\n\t" \
   7.173 -    "pushl %eax\n\t" \
   7.174 -    "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
   7.175 -    "addl $4,%esp\n\t" \
   7.176 +__asm__(                                        \
   7.177 +"\n"__ALIGN_STR"\n"                             \
   7.178 +STR(x) ":\n\t"                                  \
   7.179 +    "pushl $"#v"<<16\n\t"                       \
   7.180 +    SAVE_ALL(a)                                 \
   7.181 +    "movl %esp,%eax\n\t"                        \
   7.182 +    "pushl %eax\n\t"                            \
   7.183 +    "call "STR(smp_##x)"\n\t"                   \
   7.184 +    "addl $4,%esp\n\t"                          \
   7.185      "jmp ret_from_intr\n");
   7.186  
   7.187 -#define BUILD_COMMON_IRQ() \
   7.188 -__asm__( \
   7.189 -    "\n" __ALIGN_STR"\n" \
   7.190 -    "common_interrupt:\n\t" \
   7.191 -    SAVE_ALL(a) \
   7.192 -    "movl %esp,%eax\n\t" \
   7.193 -    "pushl %eax\n\t" \
   7.194 -    "call " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
   7.195 -    "addl $4,%esp\n\t" \
   7.196 +#define BUILD_COMMON_IRQ()                      \
   7.197 +__asm__(                                        \
   7.198 +    "\n" __ALIGN_STR"\n"                        \
   7.199 +    "common_interrupt:\n\t"                     \
   7.200 +    SAVE_ALL(a)                                 \
   7.201 +    "movl %esp,%eax\n\t"                        \
   7.202 +    "pushl %eax\n\t"                            \
   7.203 +    "call " STR(do_IRQ) "\n\t"                  \
   7.204 +    "addl $4,%esp\n\t"                          \
   7.205      "jmp ret_from_intr\n");
   7.206  
   7.207  #define IRQ_NAME2(nr) nr##_interrupt(void)
   7.208  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
   7.209  
   7.210 -#define BUILD_IRQ(nr) \
   7.211 -asmlinkage void IRQ_NAME(nr); \
   7.212 -__asm__( \
   7.213 -"\n"__ALIGN_STR"\n" \
   7.214 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
   7.215 -    "pushl $"#nr"<<16\n\t" \
   7.216 +#define BUILD_IRQ(nr)                           \
   7.217 +asmlinkage void IRQ_NAME(nr);                   \
   7.218 +__asm__(                                        \
   7.219 +"\n"__ALIGN_STR"\n"                             \
   7.220 +STR(IRQ) #nr "_interrupt:\n\t"                  \
   7.221 +    "pushl $"#nr"<<16\n\t"                      \
   7.222      "jmp common_interrupt");
   7.223  
   7.224  #endif /* __X86_32_ASM_DEFNS_H__ */
     8.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Thu Apr 28 21:32:24 2005 +0000
     8.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Thu Apr 28 21:35:49 2005 +0000
     8.3 @@ -4,87 +4,87 @@
     8.4  /* Maybe auto-generate the following two cases (quoted vs. unquoted). */
     8.5  #ifndef __ASSEMBLY__
     8.6  
     8.7 -#define SAVE_ALL \
     8.8 -        "cld;" \
     8.9 -        "pushq %rdi;" \
    8.10 -        "pushq %rsi;" \
    8.11 -        "pushq %rdx;" \
    8.12 -        "pushq %rcx;" \
    8.13 -        "pushq %rax;" \
    8.14 -        "pushq %r8;" \
    8.15 -        "pushq %r9;" \
    8.16 -        "pushq %r10;" \
    8.17 -        "pushq %r11;" \
    8.18 -        "pushq %rbx;" \
    8.19 -        "pushq %rbp;" \
    8.20 -        "pushq %r12;" \
    8.21 -        "pushq %r13;" \
    8.22 -        "pushq %r14;" \
    8.23 +#define SAVE_ALL                                \
    8.24 +        "cld;"                                  \
    8.25 +        "pushq %rdi;"                           \
    8.26 +        "pushq %rsi;"                           \
    8.27 +        "pushq %rdx;"                           \
    8.28 +        "pushq %rcx;"                           \
    8.29 +        "pushq %rax;"                           \
    8.30 +        "pushq %r8;"                            \
    8.31 +        "pushq %r9;"                            \
    8.32 +        "pushq %r10;"                           \
    8.33 +        "pushq %r11;"                           \
    8.34 +        "pushq %rbx;"                           \
    8.35 +        "pushq %rbp;"                           \
    8.36 +        "pushq %r12;"                           \
    8.37 +        "pushq %r13;"                           \
    8.38 +        "pushq %r14;"                           \
    8.39          "pushq %r15;"
    8.40  
    8.41 -#define RESTORE_ALL \
    8.42 -        "popq  %r15;" \
    8.43 -        "popq  %r14;" \
    8.44 -        "popq  %r13;" \
    8.45 -        "popq  %r12;" \
    8.46 -        "popq  %rbp;" \
    8.47 -        "popq  %rbx;" \
    8.48 -        "popq  %r11;" \
    8.49 -        "popq  %r10;" \
    8.50 -        "popq  %r9;" \
    8.51 -        "popq  %r8;" \
    8.52 -        "popq  %rax;" \
    8.53 -        "popq  %rcx;" \
    8.54 -        "popq  %rdx;" \
    8.55 -        "popq  %rsi;" \
    8.56 +#define RESTORE_ALL                             \
    8.57 +        "popq  %r15;"                           \
    8.58 +        "popq  %r14;"                           \
    8.59 +        "popq  %r13;"                           \
    8.60 +        "popq  %r12;"                           \
    8.61 +        "popq  %rbp;"                           \
    8.62 +        "popq  %rbx;"                           \
    8.63 +        "popq  %r11;"                           \
    8.64 +        "popq  %r10;"                           \
    8.65 +        "popq  %r9;"                            \
    8.66 +        "popq  %r8;"                            \
    8.67 +        "popq  %rax;"                           \
    8.68 +        "popq  %rcx;"                           \
    8.69 +        "popq  %rdx;"                           \
    8.70 +        "popq  %rsi;"                           \
    8.71          "popq  %rdi;"
    8.72  
    8.73  /* Work around AMD erratum #88 */
    8.74 -#define safe_swapgs \
    8.75 +#define safe_swapgs                             \
    8.76          "mfence; swapgs;"
    8.77  
    8.78  #else
    8.79  
    8.80 -#define SAVE_ALL \
    8.81 -        cld; \
    8.82 -        pushq %rdi; \
    8.83 -        pushq %rsi; \
    8.84 -        pushq %rdx; \
    8.85 -        pushq %rcx; \
    8.86 -        pushq %rax; \
    8.87 -        pushq %r8; \
    8.88 -        pushq %r9; \
    8.89 -        pushq %r10; \
    8.90 -        pushq %r11; \
    8.91 -        pushq %rbx; \
    8.92 -        pushq %rbp; \
    8.93 -        pushq %r12; \
    8.94 -        pushq %r13; \
    8.95 -        pushq %r14; \
    8.96 +#define SAVE_ALL                                \
    8.97 +        cld;                                    \
    8.98 +        pushq %rdi;                             \
    8.99 +        pushq %rsi;                             \
   8.100 +        pushq %rdx;                             \
   8.101 +        pushq %rcx;                             \
   8.102 +        pushq %rax;                             \
   8.103 +        pushq %r8;                              \
   8.104 +        pushq %r9;                              \
   8.105 +        pushq %r10;                             \
   8.106 +        pushq %r11;                             \
   8.107 +        pushq %rbx;                             \
   8.108 +        pushq %rbp;                             \
   8.109 +        pushq %r12;                             \
   8.110 +        pushq %r13;                             \
   8.111 +        pushq %r14;                             \
   8.112          pushq %r15;
   8.113  
   8.114 -#define RESTORE_ALL \
   8.115 -        popq  %r15; \
   8.116 -        popq  %r14; \
   8.117 -        popq  %r13; \
   8.118 -        popq  %r12; \
   8.119 -        popq  %rbp; \
   8.120 -        popq  %rbx; \
   8.121 -        popq  %r11; \
   8.122 -        popq  %r10; \
   8.123 -        popq  %r9; \
   8.124 -        popq  %r8; \
   8.125 -        popq  %rax; \
   8.126 -        popq  %rcx; \
   8.127 -        popq  %rdx; \
   8.128 -        popq  %rsi; \
   8.129 +#define RESTORE_ALL                             \
   8.130 +        popq  %r15;                             \
   8.131 +        popq  %r14;                             \
   8.132 +        popq  %r13;                             \
   8.133 +        popq  %r12;                             \
   8.134 +        popq  %rbp;                             \
   8.135 +        popq  %rbx;                             \
   8.136 +        popq  %r11;                             \
   8.137 +        popq  %r10;                             \
   8.138 +        popq  %r9;                              \
   8.139 +        popq  %r8;                              \
   8.140 +        popq  %rax;                             \
   8.141 +        popq  %rcx;                             \
   8.142 +        popq  %rdx;                             \
   8.143 +        popq  %rsi;                             \
   8.144          popq  %rdi;
   8.145  
   8.146  #ifdef PERF_COUNTERS
   8.147 -#define PERFC_INCR(_name,_idx) \
   8.148 -    pushq %rdx; \
   8.149 -    leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \
   8.150 -    lock incl (%rdx,_idx,4); \
   8.151 +#define PERFC_INCR(_name,_idx)                  \
   8.152 +    pushq %rdx;                                 \
   8.153 +    leaq perfcounters+_name(%rip),%rdx;         \
   8.154 +    lock incl (%rdx,_idx,4);                    \
   8.155      popq %rdx;
   8.156  #else
   8.157  #define PERFC_INCR(_name,_idx)
   8.158 @@ -93,49 +93,49 @@
   8.159  #endif
   8.160  
   8.161  #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
   8.162 -#define XBUILD_SMP_INTERRUPT(x,v)\
   8.163 -asmlinkage void x(void); \
   8.164 -__asm__( \
   8.165 -    "\n"__ALIGN_STR"\n" \
   8.166 -    SYMBOL_NAME_STR(x) ":\n\t" \
   8.167 -    "pushq $0\n\t" \
   8.168 -    "movl $"#v",4(%rsp)\n\t" \
   8.169 -    SAVE_ALL \
   8.170 -    "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
   8.171 +#define XBUILD_SMP_INTERRUPT(x,v)               \
   8.172 +asmlinkage void x(void);                        \
   8.173 +__asm__(                                        \
   8.174 +    "\n"__ALIGN_STR"\n"                         \
   8.175 +    STR(x) ":\n\t"                              \
   8.176 +    "pushq $0\n\t"                              \
   8.177 +    "movl $"#v",4(%rsp)\n\t"                    \
   8.178 +    SAVE_ALL                                    \
   8.179 +    "callq "STR(smp_##x)"\n\t"                  \
   8.180      "jmp ret_from_intr\n");
   8.181  
   8.182  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
   8.183 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
   8.184 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
   8.185  asmlinkage void x(struct cpu_user_regs * regs); \
   8.186 -__asm__( \
   8.187 -"\n"__ALIGN_STR"\n" \
   8.188 -SYMBOL_NAME_STR(x) ":\n\t" \
   8.189 -    "pushq $0\n\t" \
   8.190 -    "movl $"#v",4(%rsp)\n\t" \
   8.191 -    SAVE_ALL \
   8.192 -    "movq %rsp,%rdi\n\t" \
   8.193 -    "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
   8.194 +__asm__(                                        \
   8.195 +"\n"__ALIGN_STR"\n"                             \
   8.196 +STR(x) ":\n\t"                                  \
   8.197 +    "pushq $0\n\t"                              \
   8.198 +    "movl $"#v",4(%rsp)\n\t"                    \
   8.199 +    SAVE_ALL                                    \
   8.200 +    "movq %rsp,%rdi\n\t"                        \
   8.201 +    "callq "STR(smp_##x)"\n\t"                  \
   8.202      "jmp ret_from_intr\n");
   8.203  
   8.204 -#define BUILD_COMMON_IRQ() \
   8.205 -__asm__( \
   8.206 -    "\n" __ALIGN_STR"\n" \
   8.207 -    "common_interrupt:\n\t" \
   8.208 -    SAVE_ALL \
   8.209 -    "movq %rsp,%rdi\n\t" \
   8.210 -    "callq " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
   8.211 +#define BUILD_COMMON_IRQ()                      \
   8.212 +__asm__(                                        \
   8.213 +    "\n" __ALIGN_STR"\n"                        \
   8.214 +    "common_interrupt:\n\t"                     \
   8.215 +    SAVE_ALL                                    \
   8.216 +    "movq %rsp,%rdi\n\t"                        \
   8.217 +    "callq " STR(do_IRQ) "\n\t"                 \
   8.218      "jmp ret_from_intr\n");
   8.219  
   8.220  #define IRQ_NAME2(nr) nr##_interrupt(void)
   8.221  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
   8.222  
   8.223 -#define BUILD_IRQ(nr) \
   8.224 -asmlinkage void IRQ_NAME(nr); \
   8.225 -__asm__( \
   8.226 -"\n"__ALIGN_STR"\n" \
   8.227 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
   8.228 -    "pushq $0\n\t" \
   8.229 -    "movl $"#nr",4(%rsp)\n\t" \
   8.230 +#define BUILD_IRQ(nr)                           \
   8.231 +asmlinkage void IRQ_NAME(nr);                   \
   8.232 +__asm__(                                        \
   8.233 +"\n"__ALIGN_STR"\n"                             \
   8.234 +STR(IRQ) #nr "_interrupt:\n\t"                  \
   8.235 +    "pushq $0\n\t"                              \
   8.236 +    "movl $"#nr",4(%rsp)\n\t"                   \
   8.237      "jmp common_interrupt");
   8.238  
   8.239  #endif /* __X86_64_ASM_DEFNS_H__ */