ia64/xen-unstable

changeset 12681:b19922790440

[XEN] Fixup whitespace and tweak for Xen coding style.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Thu Nov 30 12:38:51 2006 +0000 (2006-11-30)
parents aa08fbbf1243
children 2a17ff9b8ffc
files xen/arch/x86/crash.c xen/arch/x86/machine_kexec.c xen/arch/x86/setup.c xen/common/kexec.c
line diff
     1.1 --- a/xen/arch/x86/crash.c	Thu Nov 30 12:38:51 2006 +0000
     1.2 +++ b/xen/arch/x86/crash.c	Thu Nov 30 12:38:51 2006 +0000
     1.3 @@ -1,6 +1,6 @@
     1.4  /******************************************************************************
     1.5   * crash.c
     1.6 - * 
     1.7 + *
     1.8   * Based heavily on arch/i386/kernel/crash.c from Linux 2.6.16
     1.9   *
    1.10   * Xen port written by:
    1.11 @@ -32,23 +32,23 @@ static atomic_t waiting_for_crash_ipi;
    1.12  
    1.13  static int crash_nmi_callback(struct cpu_user_regs *regs, int cpu)
    1.14  {
    1.15 -	/* Don't do anything if this handler is invoked on crashing cpu.
    1.16 -	 * Otherwise, system will completely hang. Crashing cpu can get
    1.17 -	 * an NMI if system was initially booted with nmi_watchdog parameter.
    1.18 -	 */
    1.19 -	if (cpu == crashing_cpu)
    1.20 -		return 1;
    1.21 -	local_irq_disable();
    1.22 +    /* Don't do anything if this handler is invoked on crashing cpu.
    1.23 +     * Otherwise, system will completely hang. Crashing cpu can get
    1.24 +     * an NMI if system was initially booted with nmi_watchdog parameter.
    1.25 +     */
    1.26 +    if ( cpu == crashing_cpu )
    1.27 +        return 1;
    1.28 +    local_irq_disable();
    1.29  
    1.30      machine_crash_save_cpu();
    1.31 -	disable_local_APIC();
    1.32 -	atomic_dec(&waiting_for_crash_ipi);
    1.33 -	hvm_disable();
    1.34 +    disable_local_APIC();
    1.35 +    atomic_dec(&waiting_for_crash_ipi);
    1.36 +    hvm_disable();
    1.37  
    1.38      for ( ; ; )
    1.39          __asm__ __volatile__ ( "hlt" );
    1.40  
    1.41 -	return 1;
    1.42 +    return 1;
    1.43  }
    1.44  
    1.45  /*
    1.46 @@ -60,31 +60,32 @@ static void smp_send_nmi_allbutself(void
    1.47  {
    1.48      cpumask_t allbutself = cpu_online_map;
    1.49  
    1.50 -   	cpu_clear(smp_processor_id(), allbutself);
    1.51 +    cpu_clear(smp_processor_id(), allbutself);
    1.52      send_IPI_mask(allbutself, APIC_DM_NMI);
    1.53  }
    1.54  
    1.55  static void nmi_shootdown_cpus(void)
    1.56  {
    1.57 -	unsigned long msecs;
    1.58 +    unsigned long msecs;
    1.59  
    1.60 -	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
    1.61 -	/* Would it be better to replace the trap vector here? */
    1.62 -	set_nmi_callback(crash_nmi_callback);
    1.63 -	/* Ensure the new callback function is set before sending
    1.64 -	 * out the NMI
    1.65 -	 */
    1.66 -	wmb();
    1.67 +    atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
    1.68 +    /* Would it be better to replace the trap vector here? */
    1.69 +    set_nmi_callback(crash_nmi_callback);
    1.70 +    /* Ensure the new callback function is set before sending
    1.71 +     * out the NMI
    1.72 +     */
    1.73 +    wmb();
    1.74  
    1.75 -	smp_send_nmi_allbutself();
    1.76 +    smp_send_nmi_allbutself();
    1.77  
    1.78 -	msecs = 1000; /* Wait at most a second for the other cpus to stop */
    1.79 -	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
    1.80 -		mdelay(1);
    1.81 -		msecs--;
    1.82 -	}
    1.83 +    msecs = 1000; /* Wait at most a second for the other cpus to stop */
    1.84 +    while ( (atomic_read(&waiting_for_crash_ipi) > 0) && msecs )
    1.85 +    {
    1.86 +        mdelay(1);
    1.87 +        msecs--;
    1.88 +    }
    1.89  
    1.90 -	/* Leave the nmi callback set */
    1.91 +    /* Leave the nmi callback set */
    1.92      disable_local_APIC();
    1.93  }
    1.94  #endif
    1.95 @@ -101,11 +102,11 @@ static void crash_save_xen_notes(void)
    1.96  
    1.97  void machine_crash_shutdown(void)
    1.98  {
    1.99 -	printk("machine_crash_shutdown: %d\n", smp_processor_id());
   1.100 -	local_irq_disable();
   1.101 +    printk("machine_crash_shutdown: %d\n", smp_processor_id());
   1.102 +    local_irq_disable();
   1.103  
   1.104  #ifdef CONFIG_SMP
   1.105 -	nmi_shootdown_cpus();
   1.106 +    nmi_shootdown_cpus();
   1.107  #endif
   1.108  
   1.109  #ifdef CONFIG_X86_IO_APIC
   1.110 @@ -125,4 +126,3 @@ void machine_crash_shutdown(void)
   1.111   * indent-tabs-mode: nil
   1.112   * End:
   1.113   */
   1.114 -
     2.1 --- a/xen/arch/x86/machine_kexec.c	Thu Nov 30 12:38:51 2006 +0000
     2.2 +++ b/xen/arch/x86/machine_kexec.c	Thu Nov 30 12:38:51 2006 +0000
     2.3 @@ -1,6 +1,6 @@
     2.4  /******************************************************************************
     2.5   * machine_kexec.c
     2.6 - * 
     2.7 + *
     2.8   * Xen port written by:
     2.9   * - Simon 'Horms' Horman <horms@verge.net.au>
    2.10   * - Magnus Damm <magnus@valinux.co.jp>
    2.11 @@ -30,23 +30,28 @@ int machine_kexec_load(int type, int slo
    2.12       * in every odd index in page_list[].
    2.13       */
    2.14  
    2.15 -    for (k = 0; k < KEXEC_XEN_NO_PAGES; k++) {
    2.16 -        if ((k & 1) == 0) {               /* even pages: machine address */
    2.17 +    for ( k = 0; k < KEXEC_XEN_NO_PAGES; k++ )
    2.18 +    {
    2.19 +        if ( (k & 1) == 0 )
    2.20 +        {
    2.21 +            /* Even pages: machine address. */
    2.22              prev_ma = image->page_list[k];
    2.23          }
    2.24 -        else {                            /* odd pages: va for previous ma */
    2.25 +        else
    2.26 +        {
    2.27 +            /* Odd pages: va for previous ma. */
    2.28              set_fixmap(fix_base + (k >> 1), prev_ma);
    2.29              image->page_list[k] = fix_to_virt(fix_base + (k >> 1));
    2.30          }
    2.31      }
    2.32  
    2.33 -  return 0;
    2.34 +    return 0;
    2.35  }
    2.36  
    2.37  void machine_kexec_unload(int type, int slot, xen_kexec_image_t *image)
    2.38  {
    2.39  }
    2.40 -  
    2.41 +
    2.42  static void __machine_shutdown(void *data)
    2.43  {
    2.44      xen_kexec_image_t *image = (xen_kexec_image_t *)data;
    2.45 @@ -63,7 +68,7 @@ static void __machine_shutdown(void *dat
    2.46  
    2.47      machine_kexec(image);
    2.48  }
    2.49 -  
    2.50 +
    2.51  void machine_shutdown(xen_kexec_image_t *image)
    2.52  {
    2.53      int reboot_cpu_id;
    2.54 @@ -71,10 +76,11 @@ void machine_shutdown(xen_kexec_image_t 
    2.55  
    2.56      reboot_cpu_id = 0;
    2.57  
    2.58 -    if (!cpu_isset(reboot_cpu_id, cpu_online_map))
    2.59 +    if ( !cpu_isset(reboot_cpu_id, cpu_online_map) )
    2.60          reboot_cpu_id = smp_processor_id();
    2.61 -    
    2.62 -    if (reboot_cpu_id != smp_processor_id()) {
    2.63 +
    2.64 +    if ( reboot_cpu_id != smp_processor_id() )
    2.65 +    {
    2.66          cpus_clear(reboot_cpu);
    2.67          cpu_set(reboot_cpu_id, reboot_cpu);
    2.68          on_selected_cpus(reboot_cpu, __machine_shutdown, image, 1, 0);
    2.69 @@ -82,7 +88,9 @@ void machine_shutdown(xen_kexec_image_t 
    2.70                  ; /* nothing */
    2.71      }
    2.72      else
    2.73 +    {
    2.74          __machine_shutdown(image);
    2.75 +    }
    2.76      BUG();
    2.77  }
    2.78  
     3.1 --- a/xen/arch/x86/setup.c	Thu Nov 30 12:38:51 2006 +0000
     3.2 +++ b/xen/arch/x86/setup.c	Thu Nov 30 12:38:51 2006 +0000
     3.3 @@ -274,11 +274,11 @@ static void srat_detect_node(int cpu)
     3.4          printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
     3.5  }
     3.6  
     3.7 -void __init move_memory(unsigned long dst, 
     3.8 +void __init move_memory(unsigned long dst,
     3.9                            unsigned long src_start, unsigned long src_end)
    3.10  {
    3.11  #if defined(CONFIG_X86_32)
    3.12 -    memmove((void *)dst,  /* use low mapping */
    3.13 +    memmove((void *)dst,            /* use low mapping */
    3.14              (void *)src_start,      /* use low mapping */
    3.15              src_end - src_start);
    3.16  #elif defined(CONFIG_X86_64)
    3.17 @@ -481,22 +481,23 @@ void __init __start_xen(multiboot_info_t
    3.18      }
    3.19  
    3.20      machine_kexec_reserved(&crash_area);
    3.21 -    if (crash_area.size > 0) {
    3.22 +    if ( crash_area.size > 0 )
    3.23 +    {
    3.24          unsigned long kdump_start, kdump_size, k;
    3.25  
    3.26 -        /* mark images pages as free for now */
    3.27 +        /* Mark images pages as free for now. */
    3.28  
    3.29          init_boot_pages(initial_images_start, initial_images_end);
    3.30  
    3.31          kdump_start = crash_area.start;
    3.32          kdump_size = crash_area.size;
    3.33  
    3.34 -        printk("Kdump: %luMB (%lukB) at 0x%lx\n", 
    3.35 +        printk("Kdump: %luMB (%lukB) at 0x%lx\n",
    3.36                 kdump_size >> 20,
    3.37                 kdump_size >> 10,
    3.38                 kdump_start);
    3.39  
    3.40 -        if ((kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK))
    3.41 +        if ( (kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK) )
    3.42              panic("Kdump parameters not page aligned\n");
    3.43  
    3.44          kdump_start >>= PAGE_SHIFT;
    3.45 @@ -506,7 +507,7 @@ void __init __start_xen(multiboot_info_t
    3.46  
    3.47          k = alloc_boot_pages_at(kdump_size, kdump_start);
    3.48  
    3.49 -        if (k != kdump_start)
    3.50 +        if ( k != kdump_start )
    3.51              panic("Unable to reserve Kdump memory\n");
    3.52  
    3.53          /* allocate pages for relocated initial images */
    3.54 @@ -516,7 +517,7 @@ void __init __start_xen(multiboot_info_t
    3.55  
    3.56          k = alloc_boot_pages(k, 1);
    3.57  
    3.58 -        if (!k)
    3.59 +        if ( !k )
    3.60              panic("Unable to allocate initial images memory\n");
    3.61  
    3.62          move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
    3.63 @@ -524,12 +525,12 @@ void __init __start_xen(multiboot_info_t
    3.64          initial_images_end -= initial_images_start;
    3.65          initial_images_start = k << PAGE_SHIFT;
    3.66          initial_images_end += initial_images_start;
    3.67 -    }        
    3.68 +    }
    3.69  
    3.70      memguard_init();
    3.71      percpu_guard_areas();
    3.72  
    3.73 -    printk("System RAM: %luMB (%lukB)\n", 
    3.74 +    printk("System RAM: %luMB (%lukB)\n",
    3.75             nr_pages >> (20 - PAGE_SHIFT),
    3.76             nr_pages << (PAGE_SHIFT - 10));
    3.77      total_pages = nr_pages;
     4.1 --- a/xen/common/kexec.c	Thu Nov 30 12:38:51 2006 +0000
     4.2 +++ b/xen/common/kexec.c	Thu Nov 30 12:38:51 2006 +0000
     4.3 @@ -1,6 +1,6 @@
     4.4  /******************************************************************************
     4.5   * kexec.c - Achitecture independent kexec code for Xen
     4.6 - * 
     4.7 + *
     4.8   * Xen port written by:
     4.9   * - Simon 'Horms' Horman <horms@verge.net.au>
    4.10   * - Magnus Damm <magnus@valinux.co.jp>
    4.11 @@ -42,7 +42,7 @@ spinlock_t kexec_lock = SPIN_LOCK_UNLOCK
    4.12  static void one_cpu_only(void)
    4.13  {
    4.14     /* Only allow the first cpu to continue - force other cpus to spin */
    4.15 -    if (test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
    4.16 +    if ( test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
    4.17      {
    4.18          while (1);
    4.19      }
    4.20 @@ -55,7 +55,7 @@ void machine_crash_save_cpu(void)
    4.21      int cpu = smp_processor_id();
    4.22      crash_note_t *cntp;
    4.23  
    4.24 -    if (!cpu_test_and_set(cpu, crash_saved_cpus))
    4.25 +    if ( !cpu_test_and_set(cpu, crash_saved_cpus) )
    4.26      {
    4.27          cntp = &per_cpu(crash_notes, cpu);
    4.28          elf_core_save_regs(&cntp->core.desc.desc.pr_reg,
    4.29 @@ -65,7 +65,7 @@ void machine_crash_save_cpu(void)
    4.30          setup_crash_note(cntp, core, CORE_STR, CORE_STR_LEN, NT_PRSTATUS);
    4.31  
    4.32          /* setup crash note "Xen", XEN_ELFNOTE_CRASH_REGS */
    4.33 -        setup_crash_note(cntp, xen_regs, XEN_STR, XEN_STR_LEN, 
    4.34 +        setup_crash_note(cntp, xen_regs, XEN_STR, XEN_STR_LEN,
    4.35                           XEN_ELFNOTE_CRASH_REGS);
    4.36      }
    4.37  }
    4.38 @@ -83,7 +83,7 @@ crash_xen_info_t *machine_crash_save_inf
    4.39      cntp = &per_cpu(crash_notes, cpu);
    4.40  
    4.41      /* setup crash note "Xen", XEN_ELFNOTE_CRASH_INFO */
    4.42 -    setup_crash_note(cntp, xen_info, XEN_STR, XEN_STR_LEN, 
    4.43 +    setup_crash_note(cntp, xen_info, XEN_STR, XEN_STR_LEN,
    4.44                       XEN_ELFNOTE_CRASH_INFO);
    4.45  
    4.46      info = &cntp->xen_info.desc.desc;
    4.47 @@ -106,7 +106,7 @@ void machine_crash_kexec(void)
    4.48      xen_kexec_image_t *image;
    4.49  
    4.50      one_cpu_only();
    4.51 - 
    4.52 +
    4.53      machine_crash_save_cpu();
    4.54      crashing_cpu = smp_processor_id();
    4.55  
    4.56 @@ -114,7 +114,7 @@ void machine_crash_kexec(void)
    4.57  
    4.58      pos = (test_bit(KEXEC_FLAG_CRASH_POS, &kexec_flags) != 0);
    4.59  
    4.60 -    if (test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags))
    4.61 +    if ( test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
    4.62      {
    4.63          image = &kexec_image[KEXEC_IMAGE_CRASH_BASE + pos];
    4.64          machine_kexec(image); /* Does not return */
    4.65 @@ -140,7 +140,7 @@ void machine_kexec_reserved(xen_kexec_re
    4.66  {
    4.67      unsigned long val[2];
    4.68      char *str = opt_crashkernel;
    4.69 -    int k = 0; 
    4.70 +    int k = 0;
    4.71  
    4.72      memset(reservation, 0, sizeof(*reservation));
    4.73  
    4.74 @@ -170,7 +170,7 @@ void machine_kexec_reserved(xen_kexec_re
    4.75  static int kexec_get_reserve(xen_kexec_range_t *range)
    4.76  {
    4.77      xen_kexec_reserve_t reservation;
    4.78 -    
    4.79 +
    4.80      machine_kexec_reserved(&reservation);
    4.81  
    4.82      range->start = reservation.start;
    4.83 @@ -182,7 +182,7 @@ extern unsigned long _text, _end;
    4.84  
    4.85  static int kexec_get_xen(xen_kexec_range_t *range, int get_ma)
    4.86  {
    4.87 -    if (get_ma)
    4.88 +    if ( get_ma )
    4.89          range->start = virt_to_maddr(&_text);
    4.90      else
    4.91          range->start = (unsigned long) &_text;
    4.92 @@ -193,7 +193,7 @@ static int kexec_get_xen(xen_kexec_range
    4.93  
    4.94  static int kexec_get_cpu(xen_kexec_range_t *range)
    4.95  {
    4.96 -    if (range->nr < 0 || range->nr >= num_present_cpus())
    4.97 +    if ( range->nr < 0 || range->nr >= num_present_cpus() )
    4.98          return -EINVAL;
    4.99  
   4.100      range->start = __pa((unsigned long)&per_cpu(crash_notes, range->nr));
   4.101 @@ -205,11 +205,11 @@ static int kexec_get_range(XEN_GUEST_HAN
   4.102  {
   4.103      xen_kexec_range_t range;
   4.104      int ret = -EINVAL;
   4.105 -    
   4.106 -    if (unlikely(copy_from_guest(&range, uarg, 1)))
   4.107 +
   4.108 +    if ( unlikely(copy_from_guest(&range, uarg, 1)) )
   4.109          return -EFAULT;
   4.110  
   4.111 -    switch (range.range)
   4.112 +    switch ( range.range )
   4.113      {
   4.114      case KEXEC_RANGE_MA_CRASH:
   4.115          ret = kexec_get_reserve(&range);
   4.116 @@ -225,15 +225,15 @@ static int kexec_get_range(XEN_GUEST_HAN
   4.117          break;
   4.118      }
   4.119  
   4.120 -    if (ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)))
   4.121 +    if ( ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)) )
   4.122          return -EFAULT;
   4.123 -    
   4.124 +
   4.125      return ret;
   4.126  }
   4.127  
   4.128  static int kexec_load_get_bits(int type, int *base, int *bit)
   4.129  {
   4.130 -    switch (type)
   4.131 +    switch ( type )
   4.132      {
   4.133      case KEXEC_TYPE_DEFAULT:
   4.134          *base = KEXEC_IMAGE_DEFAULT_BASE;
   4.135 @@ -256,24 +256,24 @@ static int kexec_load_unload(unsigned lo
   4.136      int base, bit, pos;
   4.137      int ret = 0;
   4.138  
   4.139 -    if (unlikely(copy_from_guest(&load, uarg, 1)))
   4.140 +    if ( unlikely(copy_from_guest(&load, uarg, 1)) )
   4.141          return -EFAULT;
   4.142  
   4.143 -    if (kexec_load_get_bits(load.type, &base, &bit))
   4.144 +    if ( kexec_load_get_bits(load.type, &base, &bit) )
   4.145          return -EINVAL;
   4.146  
   4.147      pos = (test_bit(bit, &kexec_flags) != 0);
   4.148  
   4.149      /* Load the user data into an unused image */
   4.150 -    if (op == KEXEC_CMD_kexec_load)
   4.151 +    if ( op == KEXEC_CMD_kexec_load )
   4.152      {
   4.153          image = &kexec_image[base + !pos];
   4.154  
   4.155          BUG_ON(test_bit((base + !pos), &kexec_flags)); /* must be free */
   4.156  
   4.157          memcpy(image, &load.image, sizeof(*image));
   4.158 -            
   4.159 -        if (!(ret = machine_kexec_load(load.type, base + !pos, image)))
   4.160 +
   4.161 +        if ( !(ret = machine_kexec_load(load.type, base + !pos, image)) )
   4.162          {
   4.163              /* Set image present bit */
   4.164              set_bit((base + !pos), &kexec_flags);
   4.165 @@ -284,9 +284,9 @@ static int kexec_load_unload(unsigned lo
   4.166      }
   4.167  
   4.168      /* Unload the old image if present and load successful */
   4.169 -    if (ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
   4.170 +    if ( ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
   4.171      {
   4.172 -        if (test_and_clear_bit((base + pos), &kexec_flags))
   4.173 +        if ( test_and_clear_bit((base + pos), &kexec_flags) )
   4.174          {
   4.175              image = &kexec_image[base + pos];
   4.176              machine_kexec_unload(load.type, base + pos, image);
   4.177 @@ -302,16 +302,16 @@ static int kexec_exec(XEN_GUEST_HANDLE(v
   4.178      xen_kexec_image_t *image;
   4.179      int base, bit, pos;
   4.180  
   4.181 -    if (unlikely(copy_from_guest(&exec, uarg, 1)))
   4.182 +    if ( unlikely(copy_from_guest(&exec, uarg, 1)) )
   4.183          return -EFAULT;
   4.184  
   4.185 -    if (kexec_load_get_bits(exec.type, &base, &bit))
   4.186 +    if ( kexec_load_get_bits(exec.type, &base, &bit) )
   4.187          return -EINVAL;
   4.188  
   4.189      pos = (test_bit(bit, &kexec_flags) != 0);
   4.190  
   4.191      /* Only allow kexec/kdump into loaded images */
   4.192 -    if (!test_bit(base + pos, &kexec_flags))
   4.193 +    if ( !test_bit(base + pos, &kexec_flags) )
   4.194          return -ENOENT;
   4.195  
   4.196      switch (exec.type)
   4.197 @@ -334,10 +334,10 @@ long do_kexec_op(unsigned long op, XEN_G
   4.198      unsigned long flags;
   4.199      int ret = -EINVAL;
   4.200  
   4.201 -    if ( !IS_PRIV(current->domain) )  
   4.202 +    if ( !IS_PRIV(current->domain) )
   4.203          return -EPERM;
   4.204  
   4.205 -    switch (op)
   4.206 +    switch ( op )
   4.207      {
   4.208      case KEXEC_CMD_kexec_get_range:
   4.209          ret = kexec_get_range(uarg);