ia64/linux-2.6.18-xen.hg

changeset 392:71a415f9179b

[IA64] Coding style fix

Mainly white spaces, // comments and * ops.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Jan 18 14:20:59 2008 -0700 (2008-01-18)
parents 77f831cbb91d
children c779f9e01072
files arch/ia64/oprofile/xenoprof.c arch/ia64/xen/hypercall.S arch/ia64/xen/hypervisor.c arch/ia64/xen/util.c arch/ia64/xen/xcom_privcmd.c include/asm-ia64/hypervisor.h include/asm-ia64/maddr.h include/asm-ia64/xenoprof.h
line diff
     1.1 --- a/arch/ia64/oprofile/xenoprof.c	Fri Jan 18 16:52:25 2008 +0000
     1.2 +++ b/arch/ia64/oprofile/xenoprof.c	Fri Jan 18 14:20:59 2008 -0700
     1.3 @@ -51,9 +51,9 @@ void xenoprof_arch_stop(void)
     1.4  }
     1.5  
     1.6  /* XXX move them to an appropriate header file. */
     1.7 -struct resource* xen_ia64_allocate_resource(unsigned long size); 
     1.8 -void xen_ia64_release_resource(struct resource* res); 
     1.9 -void xen_ia64_unmap_resource(struct resource* res); 
    1.10 +struct resource* xen_ia64_allocate_resource(unsigned long size);
    1.11 +void xen_ia64_release_resource(struct resource *res);
    1.12 +void xen_ia64_unmap_resource(struct resource *res);
    1.13  
    1.14  struct resource*
    1.15  xenoprof_ia64_allocate_resource(int32_t max_samples)
    1.16 @@ -73,7 +73,7 @@ xenoprof_ia64_allocate_resource(int32_t 
    1.17  	return xen_ia64_allocate_resource(bufsize);
    1.18  }
    1.19  
    1.20 -void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf)
    1.21 +void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf)
    1.22  {
    1.23  	if (sbuf->buffer) {
    1.24  		xen_ia64_unmap_resource(sbuf->arch.res);
    1.25 @@ -82,11 +82,11 @@ void xenoprof_arch_unmap_shared_buffer(s
    1.26  	}
    1.27  }
    1.28  
    1.29 -int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
    1.30 -                                    struct xenoprof_shared_buffer* sbuf)
    1.31 +int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
    1.32 +                                    struct xenoprof_shared_buffer *sbuf)
    1.33  {
    1.34  	int ret;
    1.35 -	struct resource* res;
    1.36 +	struct resource *res;
    1.37  
    1.38  	sbuf->buffer = NULL;
    1.39  	sbuf->arch.res = NULL;
    1.40 @@ -112,11 +112,11 @@ int xenoprof_arch_map_shared_buffer(stru
    1.41  	return ret;
    1.42  }
    1.43  
    1.44 -int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
    1.45 -                              struct xenoprof_shared_buffer* sbuf)
    1.46 +int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
    1.47 +                              struct xenoprof_shared_buffer *sbuf)
    1.48  {
    1.49  	int ret;
    1.50 -	struct resource* res;
    1.51 +	struct resource *res;
    1.52  
    1.53  	sbuf->buffer = NULL;
    1.54  	sbuf->arch.res = NULL;
     2.1 --- a/arch/ia64/xen/hypercall.S	Fri Jan 18 16:52:25 2008 +0000
     2.2 +++ b/arch/ia64/xen/hypercall.S	Fri Jan 18 14:20:59 2008 -0700
     2.3 @@ -17,7 +17,7 @@
     2.4  GLOBAL_ENTRY(xen_get_psr)
     2.5  	XEN_HYPER_GET_PSR
     2.6  	br.ret.sptk.many rp
     2.7 -    ;;
     2.8 +	;;
     2.9  END(xen_get_psr)
    2.10  
    2.11  GLOBAL_ENTRY(xen_get_ivr)
    2.12 @@ -124,13 +124,13 @@ END(xen_set_eflag)
    2.13  #endif /* ASM_SUPPORTED */
    2.14  
    2.15  GLOBAL_ENTRY(xen_send_ipi)
    2.16 -        mov r14=r32
    2.17 -        mov r15=r33
    2.18 -        mov r2=0x400
    2.19 -        break 0x1000
    2.20 -        ;;
    2.21 -        br.ret.sptk.many rp
    2.22 -        ;;
    2.23 +	mov r14=r32
    2.24 +	mov r15=r33
    2.25 +	mov r2=0x400
    2.26 +	break 0x1000
    2.27 +	;;
    2.28 +	br.ret.sptk.many rp
    2.29 +	;;
    2.30  END(xen_send_ipi)
    2.31  
    2.32  GLOBAL_ENTRY(__hypercall)
     3.1 --- a/arch/ia64/xen/hypervisor.c	Fri Jan 18 16:52:25 2008 +0000
     3.2 +++ b/arch/ia64/xen/hypervisor.c	Fri Jan 18 14:20:59 2008 -0700
     3.3 @@ -20,7 +20,6 @@
     3.4   *
     3.5   */
     3.6  
     3.7 -//#include <linux/kernel.h>
     3.8  #include <linux/spinlock.h>
     3.9  #include <linux/bootmem.h>
    3.10  #include <linux/module.h>
    3.11 @@ -35,7 +34,8 @@
    3.12  #include <xen/xencons.h>
    3.13  #include <xen/balloon.h>
    3.14  
    3.15 -shared_info_t *HYPERVISOR_shared_info __read_mostly = (shared_info_t *)XSI_BASE;
    3.16 +shared_info_t *HYPERVISOR_shared_info __read_mostly =
    3.17 +	(shared_info_t *)XSI_BASE;
    3.18  EXPORT_SYMBOL(HYPERVISOR_shared_info);
    3.19  
    3.20  start_info_t *xen_start_info;
    3.21 @@ -60,7 +60,7 @@ xen_setup(char **cmdline_p)
    3.22  
    3.23  	if (ia64_platform_is("xen"))
    3.24  		dig_setup(cmdline_p);
    3.25 -	
    3.26 +
    3.27  	if (!is_running_on_xen() || !is_initial_xendomain())
    3.28  		return;
    3.29  
    3.30 @@ -79,9 +79,11 @@ xen_cpu_init(void)
    3.31  	xen_smp_intr_init();
    3.32  }
    3.33  
    3.34 -//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
    3.35 -// move those to lib/contiguous_bitmap?
    3.36 -//XXX discontigmem/sparsemem
    3.37 +/*
    3.38 + *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
    3.39 + * move those to lib/contiguous_bitmap?
    3.40 + *XXX discontigmem/sparsemem
    3.41 + */
    3.42  
    3.43  /*
    3.44   * Bitmap is indexed by page number. If bit is set, the page is part of a
    3.45 @@ -104,16 +106,16 @@ create_contiguous_bitmap(u64 start, u64 
    3.46  	pte_t *pte;
    3.47  
    3.48  	bitmap_start = (unsigned long)contiguous_bitmap +
    3.49 -	               ((__pa(start) >> PAGE_SHIFT) >> 3);
    3.50 +		       ((__pa(start) >> PAGE_SHIFT) >> 3);
    3.51  	bitmap_end = (unsigned long)contiguous_bitmap +
    3.52 -	             (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
    3.53 +		     (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
    3.54  
    3.55  	start_page = bitmap_start & PAGE_MASK;
    3.56  	end_page = PAGE_ALIGN(bitmap_end);
    3.57  	node = paddr_to_nid(__pa(start));
    3.58  
    3.59  	bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
    3.60 -	                                  end_page - start_page);
    3.61 +					  end_page - start_page);
    3.62  	BUG_ON(!bitmap);
    3.63  	memset(bitmap, 0, end_page - start_page);
    3.64  
    3.65 @@ -121,26 +123,26 @@ create_contiguous_bitmap(u64 start, u64 
    3.66  		pgd = pgd_offset_k(address);
    3.67  		if (pgd_none(*pgd))
    3.68  			pgd_populate(&init_mm, pgd,
    3.69 -			             alloc_bootmem_pages_node(NODE_DATA(node),
    3.70 -			                                      PAGE_SIZE));
    3.71 +				     alloc_bootmem_pages_node(NODE_DATA(node),
    3.72 +							      PAGE_SIZE));
    3.73  		pud = pud_offset(pgd, address);
    3.74  
    3.75  		if (pud_none(*pud))
    3.76  			pud_populate(&init_mm, pud,
    3.77 -			             alloc_bootmem_pages_node(NODE_DATA(node),
    3.78 -			                                      PAGE_SIZE));
    3.79 +				     alloc_bootmem_pages_node(NODE_DATA(node),
    3.80 +							      PAGE_SIZE));
    3.81  		pmd = pmd_offset(pud, address);
    3.82  
    3.83  		if (pmd_none(*pmd))
    3.84  			pmd_populate_kernel(&init_mm, pmd,
    3.85 -			                    alloc_bootmem_pages_node
    3.86 -			                    (NODE_DATA(node), PAGE_SIZE));
    3.87 +					    alloc_bootmem_pages_node
    3.88 +					    (NODE_DATA(node), PAGE_SIZE));
    3.89  		pte = pte_offset_kernel(pmd, address);
    3.90  
    3.91  		if (pte_none(*pte))
    3.92  			set_pte(pte,
    3.93 -			        pfn_pte(__pa(bitmap + (address - start_page))
    3.94 -			                >> PAGE_SHIFT, PAGE_KERNEL));
    3.95 +				pfn_pte(__pa(bitmap + (address - start_page))
    3.96 +					>> PAGE_SHIFT, PAGE_KERNEL));
    3.97  	}
    3.98  	return 0;
    3.99  }
   3.100 @@ -225,9 +227,11 @@ static void contiguous_bitmap_clear(
   3.101  	}
   3.102  }
   3.103  
   3.104 -// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
   3.105 -// are based on i386 xen_create_contiguous_region(),
   3.106 -// xen_destroy_contiguous_region()
   3.107 +/*
   3.108 + * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
   3.109 + * are based on i386 xen_create_contiguous_region(),
   3.110 + * xen_destroy_contiguous_region()
   3.111 + */
   3.112  
   3.113  /* Protected by balloon_lock. */
   3.114  #define MAX_CONTIG_ORDER 7
   3.115 @@ -273,9 +277,8 @@ int
   3.116  	balloon_lock(flags);
   3.117  
   3.118  	/* Get a new contiguous memory extent. */
   3.119 -	for (i = 0; i < num_gpfn; i++) {
   3.120 +	for (i = 0; i < num_gpfn; i++)
   3.121  		in_frames[i] = start_gpfn + i;
   3.122 -	}
   3.123  	out_frame = start_gpfn;
   3.124  	error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
   3.125  	success = (exchange.nr_exchanged == num_gpfn);
   3.126 @@ -357,7 +360,7 @@ void
   3.127  			 .domid        = DOMID_SELF
   3.128  		 },
   3.129  		.nr_exchanged = 0
   3.130 -        };
   3.131 +	};
   3.132  	
   3.133  
   3.134  	if (!test_bit(start_gpfn, contiguous_bitmap))
   3.135 @@ -375,17 +378,16 @@ void
   3.136  
   3.137  	contiguous_bitmap_clear(start_gpfn, num_gpfn);
   3.138  
   3.139 -        /* Do the exchange for non-contiguous MFNs. */
   3.140 +	/* Do the exchange for non-contiguous MFNs. */
   3.141  	in_frame = start_gpfn;
   3.142 -	for (i = 0; i < num_gpfn; i++) {
   3.143 +	for (i = 0; i < num_gpfn; i++)
   3.144  		out_frames[i] = start_gpfn + i;
   3.145 -	}
   3.146  	error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
   3.147  	success = (exchange.nr_exchanged == 1);
   3.148  	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
   3.149  	BUG_ON(success && (error != 0));
   3.150  	if (unlikely(error == -ENOSYS)) {
   3.151 -                /* Compatibility when XENMEM_exchange is unsupported. */
   3.152 +		/* Compatibility when XENMEM_exchange is unsupported. */
   3.153  		error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   3.154  					     &exchange.in);
   3.155  		BUG_ON(error != 1);
   3.156 @@ -405,11 +407,10 @@ xen_limit_pages_to_max_mfn(struct page *
   3.157  					    order, address_bits);
   3.158  }
   3.159  
   3.160 -
   3.161 -///////////////////////////////////////////////////////////////////////////
   3.162 -// grant table hack
   3.163 -// cmd: GNTTABOP_xxx
   3.164 -
   3.165 +/****************************************************************************
   3.166 + * grant table hack
   3.167 + * cmd: GNTTABOP_xxx
   3.168 + */
   3.169  #include <linux/mm.h>
   3.170  #include <xen/interface/xen.h>
   3.171  #include <xen/gnttab.h>
   3.172 @@ -428,16 +429,19 @@ gnttab_map_grant_ref_pre(struct gnttab_m
   3.173  
   3.174  	if (flags & GNTMAP_host_map) {
   3.175  		if (flags & GNTMAP_application_map) {
   3.176 -			xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
   3.177 +			xprintd("GNTMAP_application_map is not supported yet:"
   3.178 +				" flags 0x%x\n", flags);
   3.179  			BUG();
   3.180  		}
   3.181  		if (flags & GNTMAP_contains_pte) {
   3.182 -			xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
   3.183 +			xprintd("GNTMAP_contains_pte is not supported yet"
   3.184 +				" flags 0x%x\n", flags);
   3.185  			BUG();
   3.186  		}
   3.187  	} else if (flags & GNTMAP_device_map) {
   3.188 -		xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
   3.189 -		BUG();//XXX not yet. actually this flag is not used.
   3.190 +		xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
   3.191 +			flags);
   3.192 +		BUG(); /* XXX not yet. actually this flag is not used. */
   3.193  	} else {
   3.194  		BUG();
   3.195  	}
   3.196 @@ -457,15 +461,17 @@ HYPERVISOR_grant_table_op(unsigned int c
   3.197  }
   3.198  EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
   3.199  
   3.200 -///////////////////////////////////////////////////////////////////////////
   3.201 -// foreign mapping
   3.202 +/**************************************************************************
   3.203 + * foreign mapping
   3.204 + */
   3.205  #include <linux/efi.h>
   3.206 -#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
   3.207 +#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
   3.208  
   3.209  static unsigned long privcmd_resource_min = 0;
   3.210 -// Xen/ia64 currently can handle pseudo physical address bits up to
   3.211 -// (PAGE_SHIFT * 3)
   3.212 -static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
   3.213 +/* Xen/ia64 currently can handle pseudo physical address bits up to
   3.214 + * (PAGE_SHIFT * 3) */
   3.215 +static unsigned long privcmd_resource_max =
   3.216 +	GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
   3.217  static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
   3.218  
   3.219  static unsigned long
   3.220 @@ -500,18 +506,18 @@ xen_ia64_privcmd_init(void)
   3.221  	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
   3.222  	efi_desc_size = ia64_boot_param->efi_memdesc_size;
   3.223  
   3.224 -	// at first check the used highest address
   3.225 +	/* at first check the used highest address */
   3.226  	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
   3.227 -		// nothing
   3.228 +		/* nothing */;
   3.229  	}
   3.230  	md = p - efi_desc_size;
   3.231  	privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
   3.232  	if (xen_ia64_privcmd_check_size(privcmd_resource_min,
   3.233 -					privcmd_resource_max)) {
   3.234 +					privcmd_resource_max))
   3.235  		goto out;
   3.236 -	}
   3.237  
   3.238 -	// the used highest address is too large. try to find the largest gap.
   3.239 +	/* the used highest address is too large.
   3.240 +	 * try to find the largest gap. */
   3.241  	tmp_min = privcmd_resource_max;
   3.242  	tmp_max = 0;
   3.243  	gap_size = 0;
   3.244 @@ -525,23 +531,21 @@ xen_ia64_privcmd_init(void)
   3.245  
   3.246  		md = p;
   3.247  		end = md_end_addr(md);
   3.248 -		if (end > privcmd_resource_max) {
   3.249 +		if (end > privcmd_resource_max)
   3.250  			break;
   3.251 -		}
   3.252  		if (end < prev_end) {
   3.253 -			// work around. 
   3.254 -			// Xen may pass incompletely sorted memory
   3.255 -			// descriptors like
   3.256 -			// [x, x + length]
   3.257 -			// [x, x]
   3.258 -			// this order should be reversed.
   3.259 +			/* work around. 
   3.260 +			 * Xen may pass incompletely sorted memory
   3.261 +			 * descriptors like
   3.262 +			 * [x, x + length]
   3.263 +			 * [x, x]
   3.264 +			 * this order should be reversed. */
   3.265  			continue;
   3.266  		}
   3.267  		next = p + efi_desc_size;
   3.268  		next_start = next->phys_addr;
   3.269 -		if (next_start > privcmd_resource_max) {
   3.270 +		if (next_start > privcmd_resource_max)
   3.271  			next_start = privcmd_resource_max;
   3.272 -		}
   3.273  		if (end < next_start && gap_size < (next_start - end)) {
   3.274  			tmp_min = end;
   3.275  			tmp_max = next_start;
   3.276 @@ -560,19 +564,21 @@ xen_ia64_privcmd_init(void)
   3.277  	privcmd_resource_max = tmp_max;
   3.278  	if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
   3.279  					 privcmd_resource_max)) {
   3.280 -		// Any large enough gap isn't found.
   3.281 -		// go ahead anyway with the warning hoping that large region
   3.282 -		// won't be requested.
   3.283 -		printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
   3.284 +		/* Any large enough gap isn't found.
   3.285 +		 * go ahead anyway with the warning hoping that large region
   3.286 +		 * won't be requested. */
   3.287 +		printk(KERN_WARNING "xen privcmd: "
   3.288 +		       "large enough region for privcmd mmap is not found.\n");
   3.289  	}
   3.290  
   3.291  out:
   3.292 -	printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
   3.293 +	printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
   3.294 +	       "[0x%lx, 0x%lx] (%ldMB)\n",
   3.295  	       privcmd_resource_min, privcmd_resource_max, 
   3.296  	       (privcmd_resource_max - privcmd_resource_min) >> 20);
   3.297  	BUG_ON(privcmd_resource_min >= privcmd_resource_max);
   3.298  
   3.299 -	// XXX this should be somewhere appropriate
   3.300 +	/* XXX this should be somewhere appropriate */
   3.301  	(void)p2m_expose_init();
   3.302  
   3.303  	return 0;
   3.304 @@ -587,12 +593,12 @@ struct xen_ia64_privcmd_entry {
   3.305  
   3.306  struct xen_ia64_privcmd_range {
   3.307  	atomic_t			ref_count;
   3.308 -	unsigned long			pgoff; // in PAGE_SIZE
   3.309 -	struct resource*		res;
   3.310 +	unsigned long			pgoff; /* in PAGE_SIZE */
   3.311 +	struct resource			*res;
   3.312  
   3.313 -	// for foreign domain p2m mapping
   3.314 -	void*				private;
   3.315 -	void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
   3.316 +	/* for foreign domain p2m mapping */
   3.317 +	void				*private;
   3.318 +	void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
   3.319  
   3.320  	unsigned long			num_entries;
   3.321  	struct xen_ia64_privcmd_entry	entries[0];
   3.322 @@ -600,30 +606,30 @@ struct xen_ia64_privcmd_range {
   3.323  
   3.324  struct xen_ia64_privcmd_vma {
   3.325  	int				is_privcmd_mmapped;
   3.326 -	struct xen_ia64_privcmd_range*	range;
   3.327 +	struct xen_ia64_privcmd_range	*range;
   3.328  
   3.329  	unsigned long			num_entries;
   3.330 -	struct xen_ia64_privcmd_entry*	entries;
   3.331 +	struct xen_ia64_privcmd_entry	*entries;
   3.332  };
   3.333  
   3.334  static void
   3.335 -xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
   3.336 +xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
   3.337  {
   3.338  	atomic_set(&entry->map_count, 0);
   3.339  	entry->gpfn = INVALID_GPFN;
   3.340  }
   3.341  
   3.342  static int
   3.343 -xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
   3.344 +xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
   3.345  			    unsigned long addr,
   3.346 -			    struct xen_ia64_privcmd_range* privcmd_range,
   3.347 +			    struct xen_ia64_privcmd_range *privcmd_range,
   3.348  			    int i,
   3.349  			    unsigned long gmfn,
   3.350  			    pgprot_t prot,
   3.351  			    domid_t domid)
   3.352  {
   3.353  	int error = 0;
   3.354 -	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
   3.355 +	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
   3.356  	unsigned long gpfn;
   3.357  	unsigned long flags;
   3.358  
   3.359 @@ -639,21 +645,18 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
   3.360  	gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
   3.361  
   3.362  	flags = ASSIGN_writable;
   3.363 -	if (pgprot_val(prot) == PROT_READ) {
   3.364 +	if (pgprot_val(prot) == PROT_READ)
   3.365  		flags = ASSIGN_readonly;
   3.366 -	}
   3.367  	error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
   3.368 -	if (error != 0) {
   3.369 +	if (error != 0)
   3.370  		goto out;
   3.371 -	}
   3.372  
   3.373  	prot = vma->vm_page_prot;
   3.374  	error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
   3.375  	if (error != 0) {
   3.376  		error = HYPERVISOR_zap_physmap(gpfn, 0);
   3.377 -		if (error) {
   3.378 -			BUG();//XXX
   3.379 -		}
   3.380 +		if (error)
   3.381 +			BUG(); /* XXX */
   3.382  	} else {
   3.383  		atomic_inc(&entry->map_count);
   3.384  		entry->gpfn = gpfn;
   3.385 @@ -664,47 +667,44 @@ out:
   3.386  }
   3.387  
   3.388  static void
   3.389 -xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
   3.390 +xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
   3.391  			      int i)
   3.392  {
   3.393 -	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
   3.394 +	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
   3.395  	unsigned long gpfn = entry->gpfn;
   3.396 -	//gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
   3.397 -	//	(vma->vm_pgoff - privcmd_range->pgoff);
   3.398 +	/* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
   3.399 +  		(vma->vm_pgoff - privcmd_range->pgoff); */
   3.400  	int error;
   3.401  
   3.402  	error = HYPERVISOR_zap_physmap(gpfn, 0);
   3.403 -	if (error) {
   3.404 -		BUG();//XXX
   3.405 -	}
   3.406 +	if (error)
   3.407 +		BUG(); /* XXX */
   3.408  	entry->gpfn = INVALID_GPFN;
   3.409  }
   3.410  
   3.411  static void
   3.412 -xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
   3.413 +xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
   3.414  			    int i)
   3.415  {
   3.416 -	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
   3.417 -	if (entry->gpfn != INVALID_GPFN) {
   3.418 +	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
   3.419 +	if (entry->gpfn != INVALID_GPFN)
   3.420  		atomic_inc(&entry->map_count);
   3.421 -	} else {
   3.422 +	else
   3.423  		BUG_ON(atomic_read(&entry->map_count) != 0);
   3.424 -	}
   3.425  }
   3.426  
   3.427  static void
   3.428 -xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
   3.429 +xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
   3.430  			     int i)
   3.431  {
   3.432 -	struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
   3.433 +	struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
   3.434  	if (entry->gpfn != INVALID_GPFN &&
   3.435 -	    atomic_dec_and_test(&entry->map_count)) {
   3.436 +	    atomic_dec_and_test(&entry->map_count))
   3.437  		xen_ia64_privcmd_entry_munmap(privcmd_range, i);
   3.438 -	}
   3.439  }
   3.440  
   3.441 -static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
   3.442 -static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
   3.443 +static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
   3.444 +static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
   3.445  
   3.446  struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
   3.447  	.open = &xen_ia64_privcmd_vma_open,
   3.448 @@ -712,12 +712,13 @@ struct vm_operations_struct xen_ia64_pri
   3.449  };
   3.450  
   3.451  static void
   3.452 -__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
   3.453 -			    struct xen_ia64_privcmd_vma* privcmd_vma,
   3.454 -			    struct xen_ia64_privcmd_range* privcmd_range)
   3.455 +__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
   3.456 +			    struct xen_ia64_privcmd_vma *privcmd_vma,
   3.457 +			    struct xen_ia64_privcmd_range *privcmd_range)
   3.458  {
   3.459  	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
   3.460 -	unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
   3.461 +	unsigned long num_entries =
   3.462 +		(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
   3.463  	unsigned long i;
   3.464  
   3.465  	BUG_ON(entry_offset < 0);
   3.466 @@ -727,36 +728,37 @@ static void
   3.467  	privcmd_vma->num_entries = num_entries;
   3.468  	privcmd_vma->entries = &privcmd_range->entries[entry_offset];
   3.469  	vma->vm_private_data = privcmd_vma;
   3.470 -	for (i = 0; i < privcmd_vma->num_entries; i++) {
   3.471 +	for (i = 0; i < privcmd_vma->num_entries; i++)
   3.472  		xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
   3.473 -	}
   3.474  
   3.475  	vma->vm_private_data = privcmd_vma;
   3.476  	vma->vm_ops = &xen_ia64_privcmd_vm_ops;
   3.477  }
   3.478  
   3.479  static void
   3.480 -xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
   3.481 +xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
   3.482  {
   3.483 -	struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.484 -	struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.485 -	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
   3.486 +	struct xen_ia64_privcmd_vma *old_privcmd_vma =
   3.487 +		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.488 +	struct xen_ia64_privcmd_vma *privcmd_vma =
   3.489 +		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.490 +	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
   3.491  
   3.492  	atomic_inc(&privcmd_range->ref_count);
   3.493 -	// vm_op->open() can't fail.
   3.494 +	/* vm_op->open() can't fail. */
   3.495  	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
   3.496 -	// copy original value if necessary
   3.497 +	/* copy original value if necessary */
   3.498  	privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
   3.499  
   3.500  	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
   3.501  }
   3.502  
   3.503  static void
   3.504 -xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
   3.505 +xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
   3.506  {
   3.507 -	struct xen_ia64_privcmd_vma* privcmd_vma =
   3.508 +	struct xen_ia64_privcmd_vma *privcmd_vma =
   3.509  		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.510 -	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
   3.511 +	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
   3.512  	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
   3.513  	unsigned long i;
   3.514  
   3.515 @@ -770,7 +772,7 @@ xen_ia64_privcmd_vma_close(struct vm_are
   3.516  	if (atomic_dec_and_test(&privcmd_range->ref_count)) {
   3.517  #if 1
   3.518  		for (i = 0; i < privcmd_range->num_entries; i++) {
   3.519 -			struct xen_ia64_privcmd_entry* entry =
   3.520 +			struct xen_ia64_privcmd_entry *entry =
   3.521  				&privcmd_range->entries[i];
   3.522  			BUG_ON(atomic_read(&entry->map_count) != 0);
   3.523  			BUG_ON(entry->gpfn != INVALID_GPFN);
   3.524 @@ -788,7 +790,7 @@ xen_ia64_privcmd_vma_close(struct vm_are
   3.525  int
   3.526  privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
   3.527  {
   3.528 -	struct xen_ia64_privcmd_vma* privcmd_vma =
   3.529 +	struct xen_ia64_privcmd_vma *privcmd_vma =
   3.530  		(struct xen_ia64_privcmd_vma *)vma->vm_private_data;
   3.531  	return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
   3.532  }
   3.533 @@ -799,9 +801,9 @@ privcmd_mmap(struct file * file, struct 
   3.534  	int error;
   3.535  	unsigned long size = vma->vm_end - vma->vm_start;
   3.536  	unsigned long num_entries = size >> PAGE_SHIFT;
   3.537 -	struct xen_ia64_privcmd_range* privcmd_range = NULL;
   3.538 -	struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
   3.539 -	struct resource* res = NULL;
   3.540 +	struct xen_ia64_privcmd_range *privcmd_range = NULL;
   3.541 +	struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
   3.542 +	struct resource *res = NULL;
   3.543  	unsigned long i;
   3.544  	BUG_ON(!is_running_on_xen());
   3.545  
   3.546 @@ -811,26 +813,22 @@ privcmd_mmap(struct file * file, struct 
   3.547  	privcmd_range =
   3.548  		vmalloc(sizeof(*privcmd_range) +
   3.549  			sizeof(privcmd_range->entries[0]) * num_entries);
   3.550 -	if (privcmd_range == NULL) {
   3.551 +	if (privcmd_range == NULL)
   3.552  		goto out_enomem0;
   3.553 -	}
   3.554  	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
   3.555 -	if (privcmd_vma == NULL) {
   3.556 +	if (privcmd_vma == NULL)
   3.557  		goto out_enomem1;
   3.558 -	}
   3.559  	privcmd_vma->is_privcmd_mmapped = 0;
   3.560  
   3.561  	res = kzalloc(sizeof(*res), GFP_KERNEL);
   3.562 -	if (res == NULL) {
   3.563 +	if (res == NULL)
   3.564  		goto out_enomem1;
   3.565 -	}
   3.566  	res->name = "Xen privcmd mmap";
   3.567  	error = allocate_resource(&iomem_resource, res, size,
   3.568  				  privcmd_resource_min, privcmd_resource_max,
   3.569  				  privcmd_resource_align, NULL, NULL);
   3.570 -	if (error) {
   3.571 +	if (error)
   3.572  		goto out_enomem1;
   3.573 -	}
   3.574  	privcmd_range->res = res;
   3.575  
   3.576  	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
   3.577 @@ -841,9 +839,8 @@ privcmd_mmap(struct file * file, struct 
   3.578  	privcmd_range->num_entries = num_entries;
   3.579  	privcmd_range->private = NULL;
   3.580  	privcmd_range->callback = NULL;
   3.581 -	for (i = 0; i < privcmd_range->num_entries; i++) {
   3.582 +	for (i = 0; i < privcmd_range->num_entries; i++)
   3.583  		xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
   3.584 -	}
   3.585  
   3.586  	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
   3.587  	return 0;
   3.588 @@ -858,15 +855,15 @@ out_enomem0:
   3.589  
   3.590  int
   3.591  direct_remap_pfn_range(struct vm_area_struct *vma,
   3.592 -		       unsigned long address,	// process virtual address
   3.593 -		       unsigned long gmfn,	// gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
   3.594 +		       unsigned long address,	/* process virtual address */
   3.595 +		       unsigned long gmfn,	/* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
   3.596  		       unsigned long size,
   3.597  		       pgprot_t prot,
   3.598 -		       domid_t  domid)		// target domain
   3.599 +		       domid_t  domid)		/* target domain */
   3.600  {
   3.601 -	struct xen_ia64_privcmd_vma* privcmd_vma =
   3.602 +	struct xen_ia64_privcmd_vma *privcmd_vma =
   3.603  		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   3.604 -	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
   3.605 +	struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
   3.606  	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
   3.607  
   3.608  	unsigned long i;
   3.609 @@ -875,28 +872,27 @@ direct_remap_pfn_range(struct vm_area_st
   3.610  	BUG_ON(!is_running_on_xen());
   3.611  
   3.612  #if 0
   3.613 -	if (prot != vm->vm_page_prot) {
   3.614 +	if (prot != vm->vm_page_prot)
   3.615  		return -EINVAL;
   3.616 -	}
   3.617  #endif
   3.618  
   3.619  	i = (address - vma->vm_start) >> PAGE_SHIFT;
   3.620  	for (offset = 0; offset < size; offset += PAGE_SIZE) {
   3.621  		error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
   3.622 -		if (error != 0) {
   3.623 +		if (error != 0)
   3.624  			break;
   3.625 -		}
   3.626  
   3.627  		i++;
   3.628  		gmfn++;
   3.629 -        }
   3.630 +	}
   3.631  
   3.632  	return error;
   3.633  }
   3.634  
   3.635  
   3.636 -///////////////////////////////////////////////////////////////////////////
   3.637 -// expose p2m table
   3.638 +/**************************************************************************
   3.639 + * expose p2m table
   3.640 + */
   3.641  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
   3.642  #include <linux/cpu.h>
   3.643  #include <asm/uaccess.h>
   3.644 @@ -914,9 +910,10 @@ static struct resource p2m_resource = {
   3.645  };
   3.646  static unsigned long p2m_assign_start_pfn __read_mostly;
   3.647  static unsigned long p2m_assign_end_pfn __read_mostly;
   3.648 -static unsigned long p2m_expose_size;	// this is referenced only when resume.
   3.649 -					// so __read_mostly doesn't make sense.
   3.650 -volatile const pte_t* p2m_pte __read_mostly;
   3.651 +static unsigned long p2m_expose_size;	/* this is referenced only when resume.
   3.652 +					 * so __read_mostly doesn't make sense.
   3.653 +					 */
   3.654 +volatile const pte_t *p2m_pte __read_mostly;
   3.655  
   3.656  #define GRANULE_PFN	PTRS_PER_PTE
   3.657  static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
   3.658 @@ -929,13 +926,13 @@ static unsigned long p2m_granule_pfn __r
   3.659  static int xen_ia64_p2m_expose __read_mostly = 1;
   3.660  module_param(xen_ia64_p2m_expose, int, 0);
   3.661  MODULE_PARM_DESC(xen_ia64_p2m_expose,
   3.662 -                 "enable/disable xen/ia64 p2m exposure optimization\n");
   3.663 +		 "enable/disable xen/ia64 p2m exposure optimization\n");
   3.664  
   3.665  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
   3.666  static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
   3.667  module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
   3.668  MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
   3.669 -                 "use/unuse dtr to map exposed p2m table\n");
   3.670 +		 "use/unuse dtr to map exposed p2m table\n");
   3.671  
   3.672  static const int p2m_page_shifts[] = {
   3.673  	_PAGE_SIZE_4K,
   3.674 @@ -957,21 +954,21 @@ struct p2m_itr_arg {
   3.675  };
   3.676  static struct p2m_itr_arg p2m_itr_arg __read_mostly;
   3.677  
   3.678 -// This should be in asm-ia64/kregs.h
   3.679 +/* This should be in asm-ia64/kregs.h */
   3.680  #define IA64_TR_P2M_TABLE	3
   3.681  
   3.682  static void
   3.683 -p2m_itr(void* info)
   3.684 +p2m_itr(void *info)
   3.685  {
   3.686 -	struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
   3.687 +	struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
   3.688  	ia64_itr(0x2, IA64_TR_P2M_TABLE,
   3.689 -	         arg->vaddr, arg->pteval, arg->log_page_size);
   3.690 +		 arg->vaddr, arg->pteval, arg->log_page_size);
   3.691  	ia64_srlz_d();
   3.692  }
   3.693  
   3.694  static int
   3.695  p2m_expose_dtr_call(struct notifier_block *self,
   3.696 -                    unsigned long event, void* ptr)
   3.697 +		    unsigned long event, void *ptr)
   3.698  {
   3.699  	unsigned int cpu = (unsigned int)(long)ptr;
   3.700  	if (event != CPU_ONLINE)
   3.701 @@ -1050,15 +1047,16 @@ p2m_expose_init(void)
   3.702  				continue;
   3.703  
   3.704  			granule_pfn = max(page_size >> PAGE_SHIFT,
   3.705 -			                  p2m_granule_pfn);
   3.706 +					  p2m_granule_pfn);
   3.707  			p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
   3.708 -			                                granule_pfn);
   3.709 +							granule_pfn);
   3.710  			p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
   3.711 -			                              granule_pfn);
   3.712 +						      granule_pfn);
   3.713  			num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
   3.714  			p2m_expose_size = num_pfn << PAGE_SHIFT;
   3.715  			p2m_size = p2m_table_size(num_pfn);
   3.716 -			p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
   3.717 +			p2m_size = ROUNDUP(p2m_size,
   3.718 +					   granule_pfn << PAGE_SHIFT);
   3.719  			if (p2m_size == page_size)
   3.720  				break;
   3.721  		}
   3.722 @@ -1073,20 +1071,21 @@ p2m_expose_init(void)
   3.723  	{
   3.724  		BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
   3.725  		p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
   3.726 -		                                p2m_granule_pfn);
   3.727 -		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
   3.728 +						p2m_granule_pfn);
   3.729 +		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
   3.730 +					      p2m_granule_pfn);
   3.731  		num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
   3.732  		p2m_expose_size = num_pfn << PAGE_SHIFT;
   3.733  		p2m_size = p2m_table_size(num_pfn);
   3.734  		p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
   3.735  		align = max(privcmd_resource_align,
   3.736 -		            p2m_granule_pfn << PAGE_SHIFT);
   3.737 +			    p2m_granule_pfn << PAGE_SHIFT);
   3.738  	}
   3.739  	
   3.740 -	// use privcmd region
   3.741 +	/* use privcmd region */
   3.742  	error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
   3.743 -	                          privcmd_resource_min, privcmd_resource_max,
   3.744 -	                          align, NULL, NULL);
   3.745 +				  privcmd_resource_min, privcmd_resource_max,
   3.746 +				  align, NULL, NULL);
   3.747  	if (error) {
   3.748  		printk(KERN_ERR P2M_PREFIX
   3.749  		       "can't allocate region for p2m exposure "
   3.750 @@ -1099,8 +1098,8 @@ p2m_expose_init(void)
   3.751  	p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
   3.752  	
   3.753  	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
   3.754 -	                              p2m_assign_start_pfn,
   3.755 -	                              p2m_expose_size, p2m_granule_pfn);
   3.756 +				      p2m_assign_start_pfn,
   3.757 +				      p2m_expose_size, p2m_granule_pfn);
   3.758  	if (error) {
   3.759  		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
   3.760  		       error);
   3.761 @@ -1115,9 +1114,9 @@ p2m_expose_init(void)
   3.762  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
   3.763  	if (xen_ia64_p2m_expose_use_dtr) {
   3.764  		p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
   3.765 -		                                        << PAGE_SHIFT);
   3.766 +							<< PAGE_SHIFT);
   3.767  		p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
   3.768 -		                                     PAGE_KERNEL));
   3.769 +						     PAGE_KERNEL));
   3.770  		p2m_itr_arg.log_page_size = log_page_size;
   3.771  		smp_mb();
   3.772  		smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
   3.773 @@ -1165,8 +1164,8 @@ p2m_expose_resume(void)
   3.774  	 * interrupts are masked when resume.
   3.775  	 */
   3.776  	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
   3.777 -	                              p2m_assign_start_pfn,
   3.778 -	                              p2m_expose_size, p2m_granule_pfn);
   3.779 +				      p2m_assign_start_pfn,
   3.780 +				      p2m_expose_size, p2m_granule_pfn);
   3.781  	if (error) {
   3.782  		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
   3.783  		       error);
   3.784 @@ -1193,11 +1192,11 @@ p2m_expose_resume(void)
   3.785  	}
   3.786  }
   3.787  
   3.788 -//XXX inlinize?
   3.789 +/* XXX inlinize? */
   3.790  unsigned long
   3.791  p2m_phystomach(unsigned long gpfn)
   3.792  {
   3.793 -	volatile const pte_t* pte;
   3.794 +	volatile const pte_t *pte;
   3.795  	unsigned long mfn;
   3.796  	unsigned long pteval;
   3.797  	
   3.798 @@ -1209,8 +1208,8 @@ p2m_phystomach(unsigned long gpfn)
   3.799  
   3.800  	mfn = INVALID_MFN;
   3.801  	if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
   3.802 -	           pte_present(__pte(pteval)) &&
   3.803 -	           pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
   3.804 +		   pte_present(__pte(pteval)) &&
   3.805 +		   pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
   3.806  		mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
   3.807  
   3.808  	return mfn;
   3.809 @@ -1224,8 +1223,9 @@ EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
   3.810  EXPORT_SYMBOL_GPL(p2m_pte);
   3.811  EXPORT_SYMBOL_GPL(p2m_phystomach);
   3.812  
   3.813 -///////////////////////////////////////////////////////////////////////////
   3.814 -// foreign domain p2m mapping
   3.815 +/**************************************************************************
   3.816 + * foreign domain p2m mapping
   3.817 + */
   3.818  #include <asm/xen/xencomm.h>
   3.819  #include <xen/public/privcmd.h>
   3.820  
   3.821 @@ -1235,10 +1235,10 @@ struct foreign_p2m_private {
   3.822  };
   3.823  
   3.824  static void
   3.825 -xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range* privcmd_range,
   3.826 -			 void* arg)
   3.827 +xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
   3.828 +			 void *arg)
   3.829  {
   3.830 -	struct foreign_p2m_private* private = (struct foreign_p2m_private*)arg;
   3.831 +	struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
   3.832  	int ret;
   3.833  
   3.834  	privcmd_range->private = NULL;
   3.835 @@ -1252,17 +1252,19 @@ xen_foreign_p2m_unexpose(struct xen_ia64
   3.836  }
   3.837  
   3.838  int
   3.839 -xen_foreign_p2m_expose(privcmd_hypercall_t* hypercall)
   3.840 +xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
   3.841  {
   3.842 -	// hypercall->
   3.843 -	// arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
   3.844 -	// arg1: va
   3.845 -	// arg2: domid
   3.846 -	// arg3: __user* memmap_info
   3.847 -	// arg4: flags
   3.848 +	/*
   3.849 +	 * hypercall->
   3.850 +	 * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
   3.851 +	 * arg1: va
   3.852 +	 * arg2: domid
   3.853 +	 * arg3: __user* memmap_info
   3.854 +	 * arg4: flags
   3.855 +	 */
   3.856  
   3.857  	int ret = 0;
   3.858 -	struct mm_struct* mm = current->mm;
   3.859 +	struct mm_struct *mm = current->mm;
   3.860  
   3.861  	unsigned long vaddr = hypercall->arg[1];
   3.862  	domid_t domid = hypercall->arg[2];
   3.863 @@ -1271,19 +1273,19 @@ xen_foreign_p2m_expose(privcmd_hypercall
   3.864  
   3.865  	struct xen_ia64_memmap_info memmap_info;
   3.866  	size_t memmap_size;
   3.867 -	struct xen_ia64_memmap_info* k_memmap_info = NULL;
   3.868 +	struct xen_ia64_memmap_info *k_memmap_info = NULL;
   3.869  	unsigned long max_gpfn;
   3.870  	unsigned long p2m_size;
   3.871 -	struct resource* res;
   3.872 +	struct resource *res;
   3.873  	unsigned long gpfn;
   3.874  
   3.875 -	struct vm_area_struct* vma;
   3.876 -	void* p;
   3.877 +	struct vm_area_struct *vma;
   3.878 +	void *p;
   3.879  	unsigned long prev_src_gpfn_end;
   3.880  
   3.881 -	struct xen_ia64_privcmd_vma* privcmd_vma;
   3.882 -	struct xen_ia64_privcmd_range* privcmd_range;
   3.883 -	struct foreign_p2m_private* private = NULL;
   3.884 +	struct xen_ia64_privcmd_vma *privcmd_vma;
   3.885 +	struct xen_ia64_privcmd_range *privcmd_range;
   3.886 +	struct foreign_p2m_private *private = NULL;
   3.887  
   3.888  	BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
   3.889  
   3.890 @@ -1338,12 +1340,14 @@ xen_foreign_p2m_expose(privcmd_hypercall
   3.891  	}
   3.892  	
   3.893  	gpfn = res->start >> PAGE_SHIFT;
   3.894 -	// arg0: dest_gpfn
   3.895 -	// arg1: domid
   3.896 -	// arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
   3.897 -	// arg3: flags
   3.898 -	// The hypercall checks its intergirty/simplfies it and 
   3.899 -	// copy it back for us.
   3.900 +	/*
   3.901 +	 * arg0: dest_gpfn
   3.902 +	 * arg1: domid
   3.903 +	 * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
   3.904 +	 * arg3: flags
   3.905 +	 * The hypercall checks its intergirty/simplfies it and 
   3.906 +	 * copy it back for us.
   3.907 +	 */
   3.908  	ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
   3.909  	      xencomm_map_no_alloc(k_memmap_info, memmap_size),
   3.910  	      hypercall->arg[4]);
   3.911 @@ -1385,7 +1389,7 @@ xen_foreign_p2m_expose(privcmd_hypercall
   3.912  				      vma->vm_page_prot);
   3.913  		if (ret) {
   3.914  			for (i = 0; i < gpfn + gpfn_offset; i++) {
   3.915 -				struct xen_ia64_privcmd_entry* entry =
   3.916 +				struct xen_ia64_privcmd_entry *entry =
   3.917  					&privcmd_range->entries[i];
   3.918  				BUG_ON(atomic_read(&entry->map_count) != 1 &&
   3.919  				       atomic_read(&entry->map_count) != 0);
   3.920 @@ -1399,7 +1403,7 @@ xen_foreign_p2m_expose(privcmd_hypercall
   3.921  		for (i = gpfn_offset;
   3.922  		     i < gpfn_offset + (size >> PAGE_SHIFT);
   3.923  		     i++) {
   3.924 -			struct xen_ia64_privcmd_entry* entry =
   3.925 +			struct xen_ia64_privcmd_entry *entry =
   3.926  				&privcmd_range->entries[i];
   3.927  			BUG_ON(atomic_read(&entry->map_count) != 0);
   3.928  			BUG_ON(entry->gpfn != INVALID_GPFN);
   3.929 @@ -1424,13 +1428,13 @@ kfree_out:
   3.930  }
   3.931  #endif
   3.932  
   3.933 -///////////////////////////////////////////////////////////////////////////
   3.934 -// for xenoprof
   3.935 -
   3.936 +/**************************************************************************
   3.937 + * for xenoprof
   3.938 + */
   3.939  struct resource*
   3.940  xen_ia64_allocate_resource(unsigned long size)
   3.941  {
   3.942 -	struct resource* res;
   3.943 +	struct resource *res;
   3.944  	int error;
   3.945  	
   3.946  	res = kzalloc(sizeof(*res), GFP_KERNEL);
   3.947 @@ -1440,8 +1444,8 @@ xen_ia64_allocate_resource(unsigned long
   3.948  	res->name = "Xen";
   3.949  	res->flags = IORESOURCE_MEM;
   3.950  	error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
   3.951 -	                          privcmd_resource_min, privcmd_resource_max,
   3.952 -	                          IA64_GRANULE_SIZE, NULL, NULL);
   3.953 +				  privcmd_resource_min, privcmd_resource_max,
   3.954 +				  IA64_GRANULE_SIZE, NULL, NULL);
   3.955  	if (error) {
   3.956  		kfree(res);
   3.957  		return ERR_PTR(error);
   3.958 @@ -1451,7 +1455,7 @@ xen_ia64_allocate_resource(unsigned long
   3.959  EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
   3.960  
   3.961  void
   3.962 -xen_ia64_release_resource(struct resource* res)
   3.963 +xen_ia64_release_resource(struct resource *res)
   3.964  {
   3.965  	release_resource(res);
   3.966  	kfree(res);
   3.967 @@ -1459,7 +1463,7 @@ xen_ia64_release_resource(struct resourc
   3.968  EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
   3.969  
   3.970  void
   3.971 -xen_ia64_unmap_resource(struct resource* res)
   3.972 +xen_ia64_unmap_resource(struct resource *res)
   3.973  {
   3.974  	unsigned long gpfn = res->start >> PAGE_SHIFT;
   3.975  	unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
   3.976 @@ -1476,8 +1480,9 @@ xen_ia64_unmap_resource(struct resource*
   3.977  }
   3.978  EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
   3.979  
   3.980 -///////////////////////////////////////////////////////////////////////////
   3.981 -// opt feature
   3.982 +/**************************************************************************
   3.983 + * opt feature
   3.984 + */
   3.985  void
   3.986  xen_ia64_enable_opt_feature(void)
   3.987  {
   3.988 @@ -1491,8 +1496,9 @@ xen_ia64_enable_opt_feature(void)
   3.989  	HYPERVISOR_opt_feature(&optf);
   3.990  }
   3.991  
   3.992 -///////////////////////////////////////////////////////////////////////////
   3.993 -// suspend/resume
   3.994 +/**************************************************************************
   3.995 + * suspend/resume
   3.996 + */
   3.997  void
   3.998  xen_post_suspend(int suspend_cancelled)
   3.999  {
     4.1 --- a/arch/ia64/xen/util.c	Fri Jan 18 16:52:25 2008 +0000
     4.2 +++ b/arch/ia64/xen/util.c	Fri Jan 18 14:20:59 2008 -0700
     4.3 @@ -35,25 +35,23 @@ struct vm_struct *alloc_vm_area(unsigned
     4.4  	int order;
     4.5  	unsigned long virt;
     4.6  	unsigned long nr_pages;
     4.7 -	struct vm_struct* area;
     4.8 -	
     4.9 +	struct vm_struct *area;
    4.10 +
    4.11  	order = get_order(size);
    4.12  	virt = __get_free_pages(GFP_KERNEL, order);
    4.13 -	if (virt == 0) {
    4.14 +	if (virt == 0)
    4.15  		goto err0;
    4.16 -	}
    4.17  	nr_pages = 1 << order;
    4.18  	scrub_pages(virt, nr_pages);
    4.19 -	
    4.20 +
    4.21  	area = kmalloc(sizeof(*area), GFP_KERNEL);
    4.22 -	if (area == NULL) {
    4.23 +	if (area == NULL)
    4.24  		goto err1;
    4.25 -	}
    4.26 -	
    4.27 -        area->flags = VM_IOREMAP;//XXX
    4.28 +
    4.29 +        area->flags = VM_IOREMAP; /* XXX */
    4.30          area->addr = (void*)virt;
    4.31          area->size = size;
    4.32 -        area->pages = NULL; //XXX
    4.33 +        area->pages = NULL; /* XXX */
    4.34          area->nr_pages = nr_pages;
    4.35          area->phys_addr = 0; 	/* xenbus_map_ring_valloc uses this field!  */
    4.36  
    4.37 @@ -63,7 +61,6 @@ err1:
    4.38  	free_pages(virt, order);
    4.39  err0:
    4.40  	return NULL;
    4.41 -	
    4.42  }
    4.43  EXPORT_SYMBOL_GPL(alloc_vm_area);
    4.44  
    4.45 @@ -73,8 +70,8 @@ void free_vm_area(struct vm_struct *area
    4.46  	unsigned long i;
    4.47  	unsigned long phys_addr = __pa(area->addr);
    4.48  
    4.49 -	// This area is used for foreign page mappping.
    4.50 -	// So underlying machine page may not be assigned.
    4.51 +	/* This area is used for foreign page mappping.
    4.52 +	 * So underlying machine page may not be assigned. */
    4.53  	for (i = 0; i < (1 << order); i++) {
    4.54  		unsigned long ret;
    4.55  		unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
     5.1 --- a/arch/ia64/xen/xcom_privcmd.c	Fri Jan 18 16:52:25 2008 +0000
     5.2 +++ b/arch/ia64/xen/xcom_privcmd.c	Fri Jan 18 14:20:59 2008 -0700
     5.3 @@ -120,8 +120,8 @@ xencomm_privcmd_sysctl(privcmd_hypercall
     5.4  			.interface_version = XEN_SYSCTL_INTERFACE_VERSION,
     5.5  			.u.perfc_op = {
     5.6  				.cmd = XEN_SYSCTL_PERFCOP_query,
     5.7 -				// .desc.p = NULL,
     5.8 -				// .val.p = NULL,
     5.9 +				/* .desc.p = NULL, */
    5.10 +				/* .val.p = NULL, */
    5.11  			},
    5.12  		};
    5.13  
     6.1 --- a/include/asm-ia64/hypervisor.h	Fri Jan 18 16:52:25 2008 +0000
     6.2 +++ b/include/asm-ia64/hypervisor.h	Fri Jan 18 14:20:59 2008 -0700
     6.3 @@ -117,7 +117,7 @@ HYPERVISOR_poll(
     6.4  }
     6.5  
     6.6  #ifndef CONFIG_VMX_GUEST
     6.7 -// for drivers/xen/privcmd/privcmd.c
     6.8 +/* for drivers/xen/privcmd/privcmd.c */
     6.9  #define machine_to_phys_mapping 0
    6.10  struct vm_area_struct;
    6.11  int direct_remap_pfn_range(struct vm_area_struct *vma,
    6.12 @@ -131,7 +131,7 @@ int privcmd_enforce_singleshot_mapping(s
    6.13  int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
    6.14  #define HAVE_ARCH_PRIVCMD_MMAP
    6.15  
    6.16 -// for drivers/xen/balloon/balloon.c
    6.17 +/* for drivers/xen/balloon/balloon.c */
    6.18  #ifdef CONFIG_XEN_SCRUB_PAGES
    6.19  #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
    6.20  #else
    6.21 @@ -178,8 +178,8 @@ void xen_ia64_enable_opt_feature(void);
    6.22  #define __pte_ma(_x)	((pte_t) {(_x)})        /* unmodified use */
    6.23  #define pfn_pte_ma(_x,_y)	__pte_ma(0)     /* unmodified use */
    6.24  
    6.25 -// for netfront.c, netback.c
    6.26 -#define MULTI_UVMFLAGS_INDEX 0 //XXX any value
    6.27 +/* for netfront.c, netback.c */
    6.28 +#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
    6.29  
    6.30  static inline void
    6.31  MULTI_update_va_mapping(
    6.32 @@ -216,7 +216,7 @@ MULTI_grant_table_op(multicall_entry_t *
    6.33  		(-ENOSYS);						\
    6.34  	})
    6.35  
    6.36 -// for debug
    6.37 +/* for debug */
    6.38  asmlinkage int xprintk(const char *fmt, ...);
    6.39  #define xprintd(fmt, ...)	xprintk("%s:%d " fmt, __func__, __LINE__, \
    6.40  					##__VA_ARGS__)
     7.1 --- a/include/asm-ia64/maddr.h	Fri Jan 18 16:52:25 2008 +0000
     7.2 +++ b/include/asm-ia64/maddr.h	Fri Jan 18 14:20:59 2008 -0700
     7.3 @@ -31,8 +31,8 @@ pfn_to_mfn_for_dma(unsigned long pfn)
     7.4  	if (p2m_initialized)
     7.5  		return p2m_phystomach(pfn);
     7.6  	mfn = HYPERVISOR_phystomach(pfn);
     7.7 -	BUG_ON(mfn == 0); // XXX
     7.8 -	BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
     7.9 +	BUG_ON(mfn == 0); /* XXX */
    7.10 +	BUG_ON(mfn == INVALID_P2M_ENTRY); /* XXX */
    7.11  	BUG_ON(mfn == INVALID_MFN);
    7.12  	return mfn;
    7.13  }
    7.14 @@ -52,7 +52,7 @@ mfn_to_pfn_for_dma(unsigned long mfn)
    7.15  	unsigned long pfn;
    7.16  	pfn = HYPERVISOR_machtophys(mfn);
    7.17  	BUG_ON(pfn == 0);
    7.18 -	//BUG_ON(pfn == INVALID_M2P_ENTRY);
    7.19 +	/* BUG_ON(pfn == INVALID_M2P_ENTRY); */
    7.20  	return pfn;
    7.21  }
    7.22  
    7.23 @@ -98,11 +98,11 @@ mfn_to_local_pfn(unsigned long mfn)
    7.24  
    7.25  #define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
    7.26  #define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
    7.27 -#define virt_to_machine(virt) __pa(virt) // for tpmfront.c
    7.28 +#define virt_to_machine(virt) __pa(virt) /* for tpmfront.c */
    7.29  
    7.30  #define set_phys_to_machine(pfn, mfn) do { } while (0)
    7.31  
    7.32 -typedef unsigned long maddr_t;	// to compile netback, netfront
    7.33 +typedef unsigned long maddr_t;	/* to compile netback, netfront */
    7.34  #ifndef _ASM_IA64_SN_TYPES_H /* paddr_t is defined in asm-ia64/sn/types.h */
    7.35  typedef unsigned long paddr_t;
    7.36  #endif
     8.1 --- a/include/asm-ia64/xenoprof.h	Fri Jan 18 16:52:25 2008 +0000
     8.2 +++ b/include/asm-ia64/xenoprof.h	Fri Jan 18 14:20:59 2008 -0700
     8.3 @@ -36,13 +36,13 @@ struct xenoprof_arch_shared_buffer {
     8.4  };
     8.5  
     8.6  struct xenoprof_shared_buffer;
     8.7 -void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
     8.8 +void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf);
     8.9  struct xenoprof_get_buffer;
    8.10 -int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
    8.11 -                                    struct xenoprof_shared_buffer* sbuf);
    8.12 +int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
    8.13 +                                    struct xenoprof_shared_buffer *sbuf);
    8.14  struct xenoprof_passive;
    8.15 -int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
    8.16 -                              struct xenoprof_shared_buffer* sbuf);
    8.17 +int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
    8.18 +                              struct xenoprof_shared_buffer *sbuf);
    8.19  
    8.20  #endif /* CONFIG_XEN */
    8.21  #endif /* __ASM_XENOPROF_H__ */