]> xenbits.xensource.com Git - xenclient/kernel.git/commitdiff
[IA64] Coding style fix
authorAlex Williamson <alex.williamson@hp.com>
Fri, 18 Jan 2008 21:20:59 +0000 (14:20 -0700)
committerAlex Williamson <alex.williamson@hp.com>
Fri, 18 Jan 2008 21:20:59 +0000 (14:20 -0700)
Mainly white spaces, // comments and * ops.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
1  2 
arch/ia64/oprofile/xenoprof.c
arch/ia64/xen/hypercall.S
arch/ia64/xen/hypervisor.c
arch/ia64/xen/util.c
arch/ia64/xen/xcom_privcmd.c
include/asm-ia64/hypervisor.h
include/asm-ia64/maddr.h
include/asm-ia64/xenoprof.h

index 998be3e66bffcf52fde5d53081ce122ae280f4f1,998be3e66bffcf52fde5d53081ce122ae280f4f1..b0dfe9d2ae662700ef28365b5bd8a4ebe544d744
@@@ -51,9 -51,9 +51,9 @@@ void xenoprof_arch_stop(void
  }
  
  /* XXX move them to an appropriate header file. */
--struct resource* xen_ia64_allocate_resource(unsigned long size); 
--void xen_ia64_release_resource(struct resource* res); 
--void xen_ia64_unmap_resource(struct resource* res); 
++struct resource* xen_ia64_allocate_resource(unsigned long size);
++void xen_ia64_release_resource(struct resource *res);
++void xen_ia64_unmap_resource(struct resource *res);
  
  struct resource*
  xenoprof_ia64_allocate_resource(int32_t max_samples)
@@@ -73,7 -73,7 +73,7 @@@
        return xen_ia64_allocate_resource(bufsize);
  }
  
--void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffersbuf)
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf)
  {
        if (sbuf->buffer) {
                xen_ia64_unmap_resource(sbuf->arch.res);
        }
  }
  
--int xenoprof_arch_map_shared_buffer(struct xenoprof_get_bufferget_buffer,
--                                    struct xenoprof_shared_buffersbuf)
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++                                    struct xenoprof_shared_buffer *sbuf)
  {
        int ret;
--      struct resourceres;
++      struct resource *res;
  
        sbuf->buffer = NULL;
        sbuf->arch.res = NULL;
        return ret;
  }
  
--int xenoprof_arch_set_passive(struct xenoprof_passivepdomain,
--                              struct xenoprof_shared_buffersbuf)
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++                              struct xenoprof_shared_buffer *sbuf)
  {
        int ret;
--      struct resourceres;
++      struct resource *res;
  
        sbuf->buffer = NULL;
        sbuf->arch.res = NULL;
index 3b067b5ef58fbb86af08e37469273d0738dc0474,3b067b5ef58fbb86af08e37469273d0738dc0474..f9752551be6402506c5f834dcb8cc2e4f24eca9c
@@@ -17,7 -17,7 +17,7 @@@
  GLOBAL_ENTRY(xen_get_psr)
        XEN_HYPER_GET_PSR
        br.ret.sptk.many rp
--    ;;
++      ;;
  END(xen_get_psr)
  
  GLOBAL_ENTRY(xen_get_ivr)
@@@ -124,13 -124,13 +124,13 @@@ END(xen_set_eflag
  #endif /* ASM_SUPPORTED */
  
  GLOBAL_ENTRY(xen_send_ipi)
--        mov r14=r32
--        mov r15=r33
--        mov r2=0x400
--        break 0x1000
--        ;;
--        br.ret.sptk.many rp
--        ;;
++      mov r14=r32
++      mov r15=r33
++      mov r2=0x400
++      break 0x1000
++      ;;
++      br.ret.sptk.many rp
++      ;;
  END(xen_send_ipi)
  
  GLOBAL_ENTRY(__hypercall)
index fb03ff52522325174546d5802b94637f4649ed96,fb03ff52522325174546d5802b94637f4649ed96..349caec5ab44c623231523b3088227446a198d2c
@@@ -20,7 -20,7 +20,6 @@@
   *
   */
  
--//#include <linux/kernel.h>
  #include <linux/spinlock.h>
  #include <linux/bootmem.h>
  #include <linux/module.h>
@@@ -35,7 -35,7 +34,8 @@@
  #include <xen/xencons.h>
  #include <xen/balloon.h>
  
--shared_info_t *HYPERVISOR_shared_info __read_mostly = (shared_info_t *)XSI_BASE;
++shared_info_t *HYPERVISOR_shared_info __read_mostly =
++      (shared_info_t *)XSI_BASE;
  EXPORT_SYMBOL(HYPERVISOR_shared_info);
  
  start_info_t *xen_start_info;
@@@ -60,7 -60,7 +60,7 @@@ xen_setup(char **cmdline_p
  
        if (ia64_platform_is("xen"))
                dig_setup(cmdline_p);
--      
++
        if (!is_running_on_xen() || !is_initial_xendomain())
                return;
  
@@@ -79,9 -79,9 +79,11 @@@ xen_cpu_init(void
        xen_smp_intr_init();
  }
  
--//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
--// move those to lib/contiguous_bitmap?
--//XXX discontigmem/sparsemem
++/*
++ *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
++ * move those to lib/contiguous_bitmap?
++ *XXX discontigmem/sparsemem
++ */
  
  /*
   * Bitmap is indexed by page number. If bit is set, the page is part of a
@@@ -104,16 -104,16 +106,16 @@@ create_contiguous_bitmap(u64 start, u6
        pte_t *pte;
  
        bitmap_start = (unsigned long)contiguous_bitmap +
--                     ((__pa(start) >> PAGE_SHIFT) >> 3);
++                     ((__pa(start) >> PAGE_SHIFT) >> 3);
        bitmap_end = (unsigned long)contiguous_bitmap +
--                   (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
++                   (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
  
        start_page = bitmap_start & PAGE_MASK;
        end_page = PAGE_ALIGN(bitmap_end);
        node = paddr_to_nid(__pa(start));
  
        bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
--                                        end_page - start_page);
++                                        end_page - start_page);
        BUG_ON(!bitmap);
        memset(bitmap, 0, end_page - start_page);
  
                pgd = pgd_offset_k(address);
                if (pgd_none(*pgd))
                        pgd_populate(&init_mm, pgd,
--                                   alloc_bootmem_pages_node(NODE_DATA(node),
--                                                            PAGE_SIZE));
++                                   alloc_bootmem_pages_node(NODE_DATA(node),
++                                                            PAGE_SIZE));
                pud = pud_offset(pgd, address);
  
                if (pud_none(*pud))
                        pud_populate(&init_mm, pud,
--                                   alloc_bootmem_pages_node(NODE_DATA(node),
--                                                            PAGE_SIZE));
++                                   alloc_bootmem_pages_node(NODE_DATA(node),
++                                                            PAGE_SIZE));
                pmd = pmd_offset(pud, address);
  
                if (pmd_none(*pmd))
                        pmd_populate_kernel(&init_mm, pmd,
--                                          alloc_bootmem_pages_node
--                                          (NODE_DATA(node), PAGE_SIZE));
++                                          alloc_bootmem_pages_node
++                                          (NODE_DATA(node), PAGE_SIZE));
                pte = pte_offset_kernel(pmd, address);
  
                if (pte_none(*pte))
                        set_pte(pte,
--                              pfn_pte(__pa(bitmap + (address - start_page))
--                                      >> PAGE_SHIFT, PAGE_KERNEL));
++                              pfn_pte(__pa(bitmap + (address - start_page))
++                                      >> PAGE_SHIFT, PAGE_KERNEL));
        }
        return 0;
  }
@@@ -225,9 -225,9 +227,11 @@@ static void contiguous_bitmap_clear
        }
  }
  
--// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
--// are based on i386 xen_create_contiguous_region(),
--// xen_destroy_contiguous_region()
++/*
++ * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
++ * are based on i386 xen_create_contiguous_region(),
++ * xen_destroy_contiguous_region()
++ */
  
  /* Protected by balloon_lock. */
  #define MAX_CONTIG_ORDER 7
@@@ -273,9 -273,9 +277,8 @@@ __xen_create_contiguous_region(unsigne
        balloon_lock(flags);
  
        /* Get a new contiguous memory extent. */
--      for (i = 0; i < num_gpfn; i++) {
++      for (i = 0; i < num_gpfn; i++)
                in_frames[i] = start_gpfn + i;
--      }
        out_frame = start_gpfn;
        error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
        success = (exchange.nr_exchanged == num_gpfn);
@@@ -357,7 -357,7 +360,7 @@@ __xen_destroy_contiguous_region(unsigne
                         .domid        = DOMID_SELF
                 },
                .nr_exchanged = 0
--        };
++      };
        
  
        if (!test_bit(start_gpfn, contiguous_bitmap))
  
        contiguous_bitmap_clear(start_gpfn, num_gpfn);
  
--        /* Do the exchange for non-contiguous MFNs. */
++      /* Do the exchange for non-contiguous MFNs. */
        in_frame = start_gpfn;
--      for (i = 0; i < num_gpfn; i++) {
++      for (i = 0; i < num_gpfn; i++)
                out_frames[i] = start_gpfn + i;
--      }
        error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
        success = (exchange.nr_exchanged == 1);
        BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
        BUG_ON(success && (error != 0));
        if (unlikely(error == -ENOSYS)) {
--                /* Compatibility when XENMEM_exchange is unsupported. */
++              /* Compatibility when XENMEM_exchange is unsupported. */
                error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
                                             &exchange.in);
                BUG_ON(error != 1);
@@@ -405,11 -405,11 +407,10 @@@ xen_limit_pages_to_max_mfn(struct page 
                                            order, address_bits);
  }
  
--
--///////////////////////////////////////////////////////////////////////////
--// grant table hack
--// cmd: GNTTABOP_xxx
--
++/****************************************************************************
++ * grant table hack
++ * cmd: GNTTABOP_xxx
++ */
  #include <linux/mm.h>
  #include <xen/interface/xen.h>
  #include <xen/gnttab.h>
@@@ -428,16 -428,16 +429,19 @@@ gnttab_map_grant_ref_pre(struct gnttab_
  
        if (flags & GNTMAP_host_map) {
                if (flags & GNTMAP_application_map) {
--                      xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
++                      xprintd("GNTMAP_application_map is not supported yet:"
++                              " flags 0x%x\n", flags);
                        BUG();
                }
                if (flags & GNTMAP_contains_pte) {
--                      xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
++                      xprintd("GNTMAP_contains_pte is not supported yet"
++                              " flags 0x%x\n", flags);
                        BUG();
                }
        } else if (flags & GNTMAP_device_map) {
--              xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
--              BUG();//XXX not yet. actually this flag is not used.
++              xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
++                      flags);
++              BUG(); /* XXX not yet. actually this flag is not used. */
        } else {
                BUG();
        }
@@@ -457,15 -457,15 +461,17 @@@ HYPERVISOR_grant_table_op(unsigned int 
  }
  EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
  
--///////////////////////////////////////////////////////////////////////////
--// foreign mapping
++/**************************************************************************
++ * foreign mapping
++ */
  #include <linux/efi.h>
--#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
++#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
  
  static unsigned long privcmd_resource_min = 0;
--// Xen/ia64 currently can handle pseudo physical address bits up to
--// (PAGE_SHIFT * 3)
--static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
++/* Xen/ia64 currently can handle pseudo physical address bits up to
++ * (PAGE_SHIFT * 3) */
++static unsigned long privcmd_resource_max =
++      GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
  static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
  
  static unsigned long
@@@ -500,18 -500,18 +506,18 @@@ xen_ia64_privcmd_init(void
        efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
        efi_desc_size = ia64_boot_param->efi_memdesc_size;
  
--      // at first check the used highest address
++      /* at first check the used highest address */
        for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
--              // nothing
++              /* nothing */;
        }
        md = p - efi_desc_size;
        privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
        if (xen_ia64_privcmd_check_size(privcmd_resource_min,
--                                      privcmd_resource_max)) {
++                                      privcmd_resource_max))
                goto out;
--      }
  
--      // the used highest address is too large. try to find the largest gap.
++      /* the used highest address is too large.
++       * try to find the largest gap. */
        tmp_min = privcmd_resource_max;
        tmp_max = 0;
        gap_size = 0;
  
                md = p;
                end = md_end_addr(md);
--              if (end > privcmd_resource_max) {
++              if (end > privcmd_resource_max)
                        break;
--              }
                if (end < prev_end) {
--                      // work around. 
--                      // Xen may pass incompletely sorted memory
--                      // descriptors like
--                      // [x, x + length]
--                      // [x, x]
--                      // this order should be reversed.
++                      /* work around. 
++                       * Xen may pass incompletely sorted memory
++                       * descriptors like
++                       * [x, x + length]
++                       * [x, x]
++                       * this order should be reversed. */
                        continue;
                }
                next = p + efi_desc_size;
                next_start = next->phys_addr;
--              if (next_start > privcmd_resource_max) {
++              if (next_start > privcmd_resource_max)
                        next_start = privcmd_resource_max;
--              }
                if (end < next_start && gap_size < (next_start - end)) {
                        tmp_min = end;
                        tmp_max = next_start;
        privcmd_resource_max = tmp_max;
        if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
                                         privcmd_resource_max)) {
--              // Any large enough gap isn't found.
--              // go ahead anyway with the warning hoping that large region
--              // won't be requested.
--              printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
++              /* Any large enough gap isn't found.
++               * go ahead anyway with the warning hoping that large region
++               * won't be requested. */
++              printk(KERN_WARNING "xen privcmd: "
++                     "large enough region for privcmd mmap is not found.\n");
        }
  
  out:
--      printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
++      printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
++             "[0x%lx, 0x%lx] (%ldMB)\n",
               privcmd_resource_min, privcmd_resource_max, 
               (privcmd_resource_max - privcmd_resource_min) >> 20);
        BUG_ON(privcmd_resource_min >= privcmd_resource_max);
  
--      // XXX this should be somewhere appropriate
++      /* XXX this should be somewhere appropriate */
        (void)p2m_expose_init();
  
        return 0;
@@@ -587,12 -587,12 +593,12 @@@ struct xen_ia64_privcmd_entry 
  
  struct xen_ia64_privcmd_range {
        atomic_t                        ref_count;
--      unsigned long                   pgoff; // in PAGE_SIZE
--      struct resource*                res;
++      unsigned long                   pgoff; /* in PAGE_SIZE */
++      struct resource                 *res;
  
--      // for foreign domain p2m mapping
--      void*                           private;
--      void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
++      /* for foreign domain p2m mapping */
++      void                            *private;
++      void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
  
        unsigned long                   num_entries;
        struct xen_ia64_privcmd_entry   entries[0];
  
  struct xen_ia64_privcmd_vma {
        int                             is_privcmd_mmapped;
--      struct xen_ia64_privcmd_range*  range;
++      struct xen_ia64_privcmd_range   *range;
  
        unsigned long                   num_entries;
--      struct xen_ia64_privcmd_entry*  entries;
++      struct xen_ia64_privcmd_entry   *entries;
  };
  
  static void
--xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entryentry)
++xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
  {
        atomic_set(&entry->map_count, 0);
        entry->gpfn = INVALID_GPFN;
  }
  
  static int
--xen_ia64_privcmd_entry_mmap(struct vm_area_structvma,
++xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
                            unsigned long addr,
--                          struct xen_ia64_privcmd_rangeprivcmd_range,
++                          struct xen_ia64_privcmd_range *privcmd_range,
                            int i,
                            unsigned long gmfn,
                            pgprot_t prot,
                            domid_t domid)
  {
        int error = 0;
--      struct xen_ia64_privcmd_entryentry = &privcmd_range->entries[i];
++      struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        unsigned long gpfn;
        unsigned long flags;
  
        gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
  
        flags = ASSIGN_writable;
--      if (pgprot_val(prot) == PROT_READ) {
++      if (pgprot_val(prot) == PROT_READ)
                flags = ASSIGN_readonly;
--      }
        error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
--      if (error != 0) {
++      if (error != 0)
                goto out;
--      }
  
        prot = vma->vm_page_prot;
        error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
        if (error != 0) {
                error = HYPERVISOR_zap_physmap(gpfn, 0);
--              if (error) {
--                      BUG();//XXX
--              }
++              if (error)
++                      BUG(); /* XXX */
        } else {
                atomic_inc(&entry->map_count);
                entry->gpfn = gpfn;
@@@ -664,47 -664,47 +667,44 @@@ out
  }
  
  static void
--xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_rangeprivcmd_range,
++xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
                              int i)
  {
--      struct xen_ia64_privcmd_entryentry = &privcmd_range->entries[i];
++      struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        unsigned long gpfn = entry->gpfn;
--      //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
--      //      (vma->vm_pgoff - privcmd_range->pgoff);
++      /gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
++              (vma->vm_pgoff - privcmd_range->pgoff); */
        int error;
  
        error = HYPERVISOR_zap_physmap(gpfn, 0);
--      if (error) {
--              BUG();//XXX
--      }
++      if (error)
++              BUG(); /* XXX */
        entry->gpfn = INVALID_GPFN;
  }
  
  static void
--xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_rangeprivcmd_range,
++xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
                            int i)
  {
--      struct xen_ia64_privcmd_entryentry = &privcmd_range->entries[i];
--      if (entry->gpfn != INVALID_GPFN) {
++      struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++      if (entry->gpfn != INVALID_GPFN)
                atomic_inc(&entry->map_count);
--      } else {
++      else
                BUG_ON(atomic_read(&entry->map_count) != 0);
--      }
  }
  
  static void
--xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_rangeprivcmd_range,
++xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
                             int i)
  {
--      struct xen_ia64_privcmd_entryentry = &privcmd_range->entries[i];
++      struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        if (entry->gpfn != INVALID_GPFN &&
--          atomic_dec_and_test(&entry->map_count)) {
++          atomic_dec_and_test(&entry->map_count))
                xen_ia64_privcmd_entry_munmap(privcmd_range, i);
--      }
  }
  
--static void xen_ia64_privcmd_vma_open(struct vm_area_structvma);
--static void xen_ia64_privcmd_vma_close(struct vm_area_structvma);
++static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
++static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
  
  struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
        .open = &xen_ia64_privcmd_vma_open,
  };
  
  static void
--__xen_ia64_privcmd_vma_open(struct vm_area_structvma,
--                          struct xen_ia64_privcmd_vmaprivcmd_vma,
--                          struct xen_ia64_privcmd_rangeprivcmd_range)
++__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
++                          struct xen_ia64_privcmd_vma *privcmd_vma,
++                          struct xen_ia64_privcmd_range *privcmd_range)
  {
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
--      unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++      unsigned long num_entries =
++              (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long i;
  
        BUG_ON(entry_offset < 0);
        privcmd_vma->num_entries = num_entries;
        privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
--      for (i = 0; i < privcmd_vma->num_entries; i++) {
++      for (i = 0; i < privcmd_vma->num_entries; i++)
                xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
--      }
  
        vma->vm_private_data = privcmd_vma;
        vma->vm_ops = &xen_ia64_privcmd_vm_ops;
  }
  
  static void
--xen_ia64_privcmd_vma_open(struct vm_area_structvma)
++xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
  {
--      struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
--      struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
--      struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++      struct xen_ia64_privcmd_vma *old_privcmd_vma =
++              (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++      struct xen_ia64_privcmd_vma *privcmd_vma =
++              (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++      struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
  
        atomic_inc(&privcmd_range->ref_count);
--      // vm_op->open() can't fail.
++      /* vm_op->open() can't fail. */
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
--      // copy original value if necessary
++      /* copy original value if necessary */
        privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
  
        __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
  }
  
  static void
--xen_ia64_privcmd_vma_close(struct vm_area_structvma)
++xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
  {
--      struct xen_ia64_privcmd_vmaprivcmd_vma =
++      struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
--      struct xen_ia64_privcmd_rangeprivcmd_range = privcmd_vma->range;
++      struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long i;
  
        if (atomic_dec_and_test(&privcmd_range->ref_count)) {
  #if 1
                for (i = 0; i < privcmd_range->num_entries; i++) {
--                      struct xen_ia64_privcmd_entryentry =
++                      struct xen_ia64_privcmd_entry *entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->gpfn != INVALID_GPFN);
  int
  privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
  {
--      struct xen_ia64_privcmd_vmaprivcmd_vma =
++      struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
        return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
  }
@@@ -799,9 -799,9 +801,9 @@@ privcmd_mmap(struct file * file, struc
        int error;
        unsigned long size = vma->vm_end - vma->vm_start;
        unsigned long num_entries = size >> PAGE_SHIFT;
--      struct xen_ia64_privcmd_rangeprivcmd_range = NULL;
--      struct xen_ia64_privcmd_vmaprivcmd_vma = NULL;
--      struct resourceres = NULL;
++      struct xen_ia64_privcmd_range *privcmd_range = NULL;
++      struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
++      struct resource *res = NULL;
        unsigned long i;
        BUG_ON(!is_running_on_xen());
  
        privcmd_range =
                vmalloc(sizeof(*privcmd_range) +
                        sizeof(privcmd_range->entries[0]) * num_entries);
--      if (privcmd_range == NULL) {
++      if (privcmd_range == NULL)
                goto out_enomem0;
--      }
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
--      if (privcmd_vma == NULL) {
++      if (privcmd_vma == NULL)
                goto out_enomem1;
--      }
        privcmd_vma->is_privcmd_mmapped = 0;
  
        res = kzalloc(sizeof(*res), GFP_KERNEL);
--      if (res == NULL) {
++      if (res == NULL)
                goto out_enomem1;
--      }
        res->name = "Xen privcmd mmap";
        error = allocate_resource(&iomem_resource, res, size,
                                  privcmd_resource_min, privcmd_resource_max,
                                  privcmd_resource_align, NULL, NULL);
--      if (error) {
++      if (error)
                goto out_enomem1;
--      }
        privcmd_range->res = res;
  
        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
        privcmd_range->num_entries = num_entries;
        privcmd_range->private = NULL;
        privcmd_range->callback = NULL;
--      for (i = 0; i < privcmd_range->num_entries; i++) {
++      for (i = 0; i < privcmd_range->num_entries; i++)
                xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
--      }
  
        __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
        return 0;
@@@ -858,15 -858,15 +855,15 @@@ out_enomem0
  
  int
  direct_remap_pfn_range(struct vm_area_struct *vma,
--                     unsigned long address,   // process virtual address
--                     unsigned long gmfn,      // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
++                     unsigned long address,   /* process virtual address */
++                     unsigned long gmfn,      /* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
                       unsigned long size,
                       pgprot_t prot,
--                     domid_t  domid)          // target domain
++                     domid_t  domid)          /* target domain */
  {
--      struct xen_ia64_privcmd_vmaprivcmd_vma =
++      struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
--      struct xen_ia64_privcmd_rangeprivcmd_range = privcmd_vma->range;
++      struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
  
        unsigned long i;
        BUG_ON(!is_running_on_xen());
  
  #if 0
--      if (prot != vm->vm_page_prot) {
++      if (prot != vm->vm_page_prot)
                return -EINVAL;
--      }
  #endif
  
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
                error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
--              if (error != 0) {
++              if (error != 0)
                        break;
--              }
  
                i++;
                gmfn++;
--        }
++      }
  
        return error;
  }
  
  
--///////////////////////////////////////////////////////////////////////////
--// expose p2m table
++/**************************************************************************
++ * expose p2m table
++ */
  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
  #include <linux/cpu.h>
  #include <asm/uaccess.h>
@@@ -914,9 -914,9 +910,10 @@@ static struct resource p2m_resource = 
  };
  static unsigned long p2m_assign_start_pfn __read_mostly;
  static unsigned long p2m_assign_end_pfn __read_mostly;
--static unsigned long p2m_expose_size; // this is referenced only when resume.
--                                      // so __read_mostly doesn't make sense.
--volatile const pte_t* p2m_pte __read_mostly;
++static unsigned long p2m_expose_size; /* this is referenced only when resume.
++                                       * so __read_mostly doesn't make sense.
++                                       */
++volatile const pte_t *p2m_pte __read_mostly;
  
  #define GRANULE_PFN   PTRS_PER_PTE
  static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
  static int xen_ia64_p2m_expose __read_mostly = 1;
  module_param(xen_ia64_p2m_expose, int, 0);
  MODULE_PARM_DESC(xen_ia64_p2m_expose,
--                 "enable/disable xen/ia64 p2m exposure optimization\n");
++               "enable/disable xen/ia64 p2m exposure optimization\n");
  
  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
  static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
  module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
  MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
--                 "use/unuse dtr to map exposed p2m table\n");
++               "use/unuse dtr to map exposed p2m table\n");
  
  static const int p2m_page_shifts[] = {
        _PAGE_SIZE_4K,
@@@ -957,21 -957,21 +954,21 @@@ struct p2m_itr_arg 
  };
  static struct p2m_itr_arg p2m_itr_arg __read_mostly;
  
--// This should be in asm-ia64/kregs.h
++/* This should be in asm-ia64/kregs.h */
  #define IA64_TR_P2M_TABLE     3
  
  static void
--p2m_itr(voidinfo)
++p2m_itr(void *info)
  {
--      struct p2m_itr_argarg = (struct p2m_itr_arg*)info;
++      struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
        ia64_itr(0x2, IA64_TR_P2M_TABLE,
--               arg->vaddr, arg->pteval, arg->log_page_size);
++               arg->vaddr, arg->pteval, arg->log_page_size);
        ia64_srlz_d();
  }
  
  static int
  p2m_expose_dtr_call(struct notifier_block *self,
--                    unsigned long event, void* ptr)
++                  unsigned long event, void *ptr)
  {
        unsigned int cpu = (unsigned int)(long)ptr;
        if (event != CPU_ONLINE)
@@@ -1050,15 -1050,15 +1047,16 @@@ p2m_expose_init(void
                                continue;
  
                        granule_pfn = max(page_size >> PAGE_SHIFT,
--                                        p2m_granule_pfn);
++                                        p2m_granule_pfn);
                        p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
--                                                      granule_pfn);
++                                                      granule_pfn);
                        p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
--                                                    granule_pfn);
++                                                    granule_pfn);
                        num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
                        p2m_expose_size = num_pfn << PAGE_SHIFT;
                        p2m_size = p2m_table_size(num_pfn);
--                      p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
++                      p2m_size = ROUNDUP(p2m_size,
++                                         granule_pfn << PAGE_SHIFT);
                        if (p2m_size == page_size)
                                break;
                }
        {
                BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
                p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
--                                              p2m_granule_pfn);
--              p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
++                                              p2m_granule_pfn);
++              p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
++                                            p2m_granule_pfn);
                num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
                p2m_expose_size = num_pfn << PAGE_SHIFT;
                p2m_size = p2m_table_size(num_pfn);
                p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
                align = max(privcmd_resource_align,
--                          p2m_granule_pfn << PAGE_SHIFT);
++                          p2m_granule_pfn << PAGE_SHIFT);
        }
        
--      // use privcmd region
++      /* use privcmd region */
        error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
--                                privcmd_resource_min, privcmd_resource_max,
--                                align, NULL, NULL);
++                                privcmd_resource_min, privcmd_resource_max,
++                                align, NULL, NULL);
        if (error) {
                printk(KERN_ERR P2M_PREFIX
                       "can't allocate region for p2m exposure "
        p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
        
        error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
--                                    p2m_assign_start_pfn,
--                                    p2m_expose_size, p2m_granule_pfn);
++                                    p2m_assign_start_pfn,
++                                    p2m_expose_size, p2m_granule_pfn);
        if (error) {
                printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
                       error);
  #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
        if (xen_ia64_p2m_expose_use_dtr) {
                p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
--                                                      << PAGE_SHIFT);
++                                                      << PAGE_SHIFT);
                p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
--                                                   PAGE_KERNEL));
++                                                   PAGE_KERNEL));
                p2m_itr_arg.log_page_size = log_page_size;
                smp_mb();
                smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
@@@ -1165,8 -1165,8 +1164,8 @@@ p2m_expose_resume(void
         * interrupts are masked when resume.
         */
        error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
--                                    p2m_assign_start_pfn,
--                                    p2m_expose_size, p2m_granule_pfn);
++                                    p2m_assign_start_pfn,
++                                    p2m_expose_size, p2m_granule_pfn);
        if (error) {
                printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
                       error);
        }
  }
  
--//XXX inlinize?
++/* XXX inlinize? */
  unsigned long
  p2m_phystomach(unsigned long gpfn)
  {
--      volatile const pte_tpte;
++      volatile const pte_t *pte;
        unsigned long mfn;
        unsigned long pteval;
        
  
        mfn = INVALID_MFN;
        if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
--                 pte_present(__pte(pteval)) &&
--                 pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
++                 pte_present(__pte(pteval)) &&
++                 pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
                mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
  
        return mfn;
@@@ -1224,8 -1224,8 +1223,9 @@@ EXPORT_SYMBOL_GPL(p2m_convert_max_pfn)
  EXPORT_SYMBOL_GPL(p2m_pte);
  EXPORT_SYMBOL_GPL(p2m_phystomach);
  
--///////////////////////////////////////////////////////////////////////////
--// foreign domain p2m mapping
++/**************************************************************************
++ * foreign domain p2m mapping
++ */
  #include <asm/xen/xencomm.h>
  #include <xen/public/privcmd.h>
  
@@@ -1235,10 -1235,10 +1235,10 @@@ struct foreign_p2m_private 
  };
  
  static void
--xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_rangeprivcmd_range,
--                       voidarg)
++xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
++                       void *arg)
  {
--      struct foreign_p2m_privateprivate = (struct foreign_p2m_private*)arg;
++      struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
        int ret;
  
        privcmd_range->private = NULL;
  }
  
  int
--xen_foreign_p2m_expose(privcmd_hypercall_thypercall)
++xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
  {
--      // hypercall->
--      // arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
--      // arg1: va
--      // arg2: domid
--      // arg3: __user* memmap_info
--      // arg4: flags
++      /*
++       * hypercall->
++       * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
++       * arg1: va
++       * arg2: domid
++       * arg3: __user* memmap_info
++       * arg4: flags
++       */
  
        int ret = 0;
--      struct mm_structmm = current->mm;
++      struct mm_struct *mm = current->mm;
  
        unsigned long vaddr = hypercall->arg[1];
        domid_t domid = hypercall->arg[2];
  
        struct xen_ia64_memmap_info memmap_info;
        size_t memmap_size;
--      struct xen_ia64_memmap_infok_memmap_info = NULL;
++      struct xen_ia64_memmap_info *k_memmap_info = NULL;
        unsigned long max_gpfn;
        unsigned long p2m_size;
--      struct resourceres;
++      struct resource *res;
        unsigned long gpfn;
  
--      struct vm_area_structvma;
--      voidp;
++      struct vm_area_struct *vma;
++      void *p;
        unsigned long prev_src_gpfn_end;
  
--      struct xen_ia64_privcmd_vmaprivcmd_vma;
--      struct xen_ia64_privcmd_rangeprivcmd_range;
--      struct foreign_p2m_privateprivate = NULL;
++      struct xen_ia64_privcmd_vma *privcmd_vma;
++      struct xen_ia64_privcmd_range *privcmd_range;
++      struct foreign_p2m_private *private = NULL;
  
        BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
  
        }
        
        gpfn = res->start >> PAGE_SHIFT;
--      // arg0: dest_gpfn
--      // arg1: domid
--      // arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
--      // arg3: flags
--      // The hypercall checks its intergirty/simplfies it and 
--      // copy it back for us.
++      /*
++       * arg0: dest_gpfn
++       * arg1: domid
++       * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
++       * arg3: flags
++       * The hypercall checks its intergirty/simplfies it and 
++       * copy it back for us.
++       */
        ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
              xencomm_map_no_alloc(k_memmap_info, memmap_size),
              hypercall->arg[4]);
                                      vma->vm_page_prot);
                if (ret) {
                        for (i = 0; i < gpfn + gpfn_offset; i++) {
--                              struct xen_ia64_privcmd_entryentry =
++                              struct xen_ia64_privcmd_entry *entry =
                                        &privcmd_range->entries[i];
                                BUG_ON(atomic_read(&entry->map_count) != 1 &&
                                       atomic_read(&entry->map_count) != 0);
                for (i = gpfn_offset;
                     i < gpfn_offset + (size >> PAGE_SHIFT);
                     i++) {
--                      struct xen_ia64_privcmd_entryentry =
++                      struct xen_ia64_privcmd_entry *entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->gpfn != INVALID_GPFN);
@@@ -1424,13 -1424,13 +1428,13 @@@ kfree_out
  }
  #endif
  
--///////////////////////////////////////////////////////////////////////////
--// for xenoprof
--
++/**************************************************************************
++ * for xenoprof
++ */
  struct resource*
  xen_ia64_allocate_resource(unsigned long size)
  {
--      struct resourceres;
++      struct resource *res;
        int error;
        
        res = kzalloc(sizeof(*res), GFP_KERNEL);
        res->name = "Xen";
        res->flags = IORESOURCE_MEM;
        error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
--                                privcmd_resource_min, privcmd_resource_max,
--                                IA64_GRANULE_SIZE, NULL, NULL);
++                                privcmd_resource_min, privcmd_resource_max,
++                                IA64_GRANULE_SIZE, NULL, NULL);
        if (error) {
                kfree(res);
                return ERR_PTR(error);
  EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
  
  void
--xen_ia64_release_resource(struct resourceres)
++xen_ia64_release_resource(struct resource *res)
  {
        release_resource(res);
        kfree(res);
  EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
  
  void
--xen_ia64_unmap_resource(struct resourceres)
++xen_ia64_unmap_resource(struct resource *res)
  {
        unsigned long gpfn = res->start >> PAGE_SHIFT;
        unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
  }
  EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
  
--///////////////////////////////////////////////////////////////////////////
--// opt feature
++/**************************************************************************
++ * opt feature
++ */
  void
  xen_ia64_enable_opt_feature(void)
  {
        HYPERVISOR_opt_feature(&optf);
  }
  
--///////////////////////////////////////////////////////////////////////////
--// suspend/resume
++/**************************************************************************
++ * suspend/resume
++ */
  void
  xen_post_suspend(int suspend_cancelled)
  {
index 387a1c33688d2edbe5c3d7b84b5d8f023ecdba1b,387a1c33688d2edbe5c3d7b84b5d8f023ecdba1b..b5b1afaf70b22e26e860247b8b6c5c3c18d60776
@@@ -35,25 -35,25 +35,23 @@@ struct vm_struct *alloc_vm_area(unsigne
        int order;
        unsigned long virt;
        unsigned long nr_pages;
--      struct vm_structarea;
--      
++      struct vm_struct *area;
++
        order = get_order(size);
        virt = __get_free_pages(GFP_KERNEL, order);
--      if (virt == 0) {
++      if (virt == 0)
                goto err0;
--      }
        nr_pages = 1 << order;
        scrub_pages(virt, nr_pages);
--      
++
        area = kmalloc(sizeof(*area), GFP_KERNEL);
--      if (area == NULL) {
++      if (area == NULL)
                goto err1;
--      }
--      
--        area->flags = VM_IOREMAP;//XXX
++
++        area->flags = VM_IOREMAP; /* XXX */
          area->addr = (void*)virt;
          area->size = size;
--        area->pages = NULL; //XXX
++        area->pages = NULL; /* XXX */
          area->nr_pages = nr_pages;
          area->phys_addr = 0;  /* xenbus_map_ring_valloc uses this field!  */
  
@@@ -63,7 -63,7 +61,6 @@@ err1
        free_pages(virt, order);
  err0:
        return NULL;
--      
  }
  EXPORT_SYMBOL_GPL(alloc_vm_area);
  
@@@ -73,8 -73,8 +70,8 @@@ void free_vm_area(struct vm_struct *are
        unsigned long i;
        unsigned long phys_addr = __pa(area->addr);
  
--      // This area is used for foreign page mappping.
--      // So underlying machine page may not be assigned.
++      /* This area is used for foreign page mappping.
++       * So underlying machine page may not be assigned. */
        for (i = 0; i < (1 << order); i++) {
                unsigned long ret;
                unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
index f853b01e598566282d9e88ef34e97c786fd929a1,f853b01e598566282d9e88ef34e97c786fd929a1..8d167e8277ec05fe9c27df3b4721163751fa0e18
@@@ -120,8 -120,8 +120,8 @@@ xencomm_privcmd_sysctl(privcmd_hypercal
                        .interface_version = XEN_SYSCTL_INTERFACE_VERSION,
                        .u.perfc_op = {
                                .cmd = XEN_SYSCTL_PERFCOP_query,
--                              // .desc.p = NULL,
--                              // .val.p = NULL,
++                              /* .desc.p = NULL, */
++                              /* .val.p = NULL, */
                        },
                };
  
index cbc31b39d266144d96ee360ec7d122e4b5c60cc2,cbc31b39d266144d96ee360ec7d122e4b5c60cc2..5d2f3957b9e5e0824eceb05babf722b58ce47a7e
@@@ -117,7 -117,7 +117,7 @@@ HYPERVISOR_poll
  }
  
  #ifndef CONFIG_VMX_GUEST
--// for drivers/xen/privcmd/privcmd.c
++/* for drivers/xen/privcmd/privcmd.c */
  #define machine_to_phys_mapping 0
  struct vm_area_struct;
  int direct_remap_pfn_range(struct vm_area_struct *vma,
@@@ -131,7 -131,7 +131,7 @@@ int privcmd_enforce_singleshot_mapping(
  int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
  #define HAVE_ARCH_PRIVCMD_MMAP
  
--// for drivers/xen/balloon/balloon.c
++/* for drivers/xen/balloon/balloon.c */
  #ifdef CONFIG_XEN_SCRUB_PAGES
  #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
  #else
@@@ -178,8 -178,8 +178,8 @@@ void xen_ia64_enable_opt_feature(void)
  #define __pte_ma(_x)  ((pte_t) {(_x)})        /* unmodified use */
  #define pfn_pte_ma(_x,_y)     __pte_ma(0)     /* unmodified use */
  
--// for netfront.c, netback.c
--#define MULTI_UVMFLAGS_INDEX 0 //XXX any value
++/* for netfront.c, netback.c */
++#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
  
  static inline void
  MULTI_update_va_mapping(
@@@ -216,7 -216,7 +216,7 @@@ MULTI_grant_table_op(multicall_entry_t 
                (-ENOSYS);                                              \
        })
  
--// for debug
++/* for debug */
  asmlinkage int xprintk(const char *fmt, ...);
  #define xprintd(fmt, ...)     xprintk("%s:%d " fmt, __func__, __LINE__, \
                                        ##__VA_ARGS__)
index 81bee92856ea71724e14de4c0e2cee740bfe8867,81bee92856ea71724e14de4c0e2cee740bfe8867..9259032f88e79faf5fcb25d5feddb7d1d88de63d
@@@ -31,8 -31,8 +31,8 @@@ pfn_to_mfn_for_dma(unsigned long pfn
        if (p2m_initialized)
                return p2m_phystomach(pfn);
        mfn = HYPERVISOR_phystomach(pfn);
--      BUG_ON(mfn == 0); // XXX
--      BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
++      BUG_ON(mfn == 0); /* XXX */
++      BUG_ON(mfn == INVALID_P2M_ENTRY); /* XXX */
        BUG_ON(mfn == INVALID_MFN);
        return mfn;
  }
@@@ -52,7 -52,7 +52,7 @@@ mfn_to_pfn_for_dma(unsigned long mfn
        unsigned long pfn;
        pfn = HYPERVISOR_machtophys(mfn);
        BUG_ON(pfn == 0);
--      //BUG_ON(pfn == INVALID_M2P_ENTRY);
++      /* BUG_ON(pfn == INVALID_M2P_ENTRY); */
        return pfn;
  }
  
@@@ -98,11 -98,11 +98,11 @@@ mfn_to_local_pfn(unsigned long mfn
  
  #define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
  #define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
--#define virt_to_machine(virt) __pa(virt) // for tpmfront.c
++#define virt_to_machine(virt) __pa(virt) /* for tpmfront.c */
  
  #define set_phys_to_machine(pfn, mfn) do { } while (0)
  
--typedef unsigned long maddr_t;        // to compile netback, netfront
++typedef unsigned long maddr_t;        /* to compile netback, netfront */
  #ifndef _ASM_IA64_SN_TYPES_H /* paddr_t is defined in asm-ia64/sn/types.h */
  typedef unsigned long paddr_t;
  #endif
index b45c6ca602303d8945c35add112c9d59728aa550,b45c6ca602303d8945c35add112c9d59728aa550..09532c40b07735a380f6a0b09e6153b77e228340
@@@ -36,13 -36,13 +36,13 @@@ struct xenoprof_arch_shared_buffer 
  };
  
  struct xenoprof_shared_buffer;
--void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffersbuf);
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf);
  struct xenoprof_get_buffer;
--int xenoprof_arch_map_shared_buffer(struct xenoprof_get_bufferget_buffer,
--                                    struct xenoprof_shared_buffersbuf);
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++                                    struct xenoprof_shared_buffer *sbuf);
  struct xenoprof_passive;
--int xenoprof_arch_set_passive(struct xenoprof_passivepdomain,
--                              struct xenoprof_shared_buffersbuf);
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++                              struct xenoprof_shared_buffer *sbuf);
  
  #endif /* CONFIG_XEN */
  #endif /* __ASM_XENOPROF_H__ */