*
*/
--//#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <xen/xencons.h>
#include <xen/balloon.h>
--shared_info_t *HYPERVISOR_shared_info __read_mostly = (shared_info_t *)XSI_BASE;
++shared_info_t *HYPERVISOR_shared_info __read_mostly =
++ (shared_info_t *)XSI_BASE;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
start_info_t *xen_start_info;
if (ia64_platform_is("xen"))
dig_setup(cmdline_p);
--
++
if (!is_running_on_xen() || !is_initial_xendomain())
return;
xen_smp_intr_init();
}
--//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
--// move those to lib/contiguous_bitmap?
--//XXX discontigmem/sparsemem
++/*
++ *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
++ * move those to lib/contiguous_bitmap?
++ *XXX discontigmem/sparsemem
++ */
/*
* Bitmap is indexed by page number. If bit is set, the page is part of a
pte_t *pte;
bitmap_start = (unsigned long)contiguous_bitmap +
-- ((__pa(start) >> PAGE_SHIFT) >> 3);
++ ((__pa(start) >> PAGE_SHIFT) >> 3);
bitmap_end = (unsigned long)contiguous_bitmap +
-- (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
++ (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
start_page = bitmap_start & PAGE_MASK;
end_page = PAGE_ALIGN(bitmap_end);
node = paddr_to_nid(__pa(start));
bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
-- end_page - start_page);
++ end_page - start_page);
BUG_ON(!bitmap);
memset(bitmap, 0, end_page - start_page);
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd,
-- alloc_bootmem_pages_node(NODE_DATA(node),
-- PAGE_SIZE));
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
pud = pud_offset(pgd, address);
if (pud_none(*pud))
pud_populate(&init_mm, pud,
-- alloc_bootmem_pages_node(NODE_DATA(node),
-- PAGE_SIZE));
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd,
-- alloc_bootmem_pages_node
-- (NODE_DATA(node), PAGE_SIZE));
++ alloc_bootmem_pages_node
++ (NODE_DATA(node), PAGE_SIZE));
pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte))
set_pte(pte,
-- pfn_pte(__pa(bitmap + (address - start_page))
-- >> PAGE_SHIFT, PAGE_KERNEL));
++ pfn_pte(__pa(bitmap + (address - start_page))
++ >> PAGE_SHIFT, PAGE_KERNEL));
}
return 0;
}
}
}
--// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
--// are based on i386 xen_create_contiguous_region(),
--// xen_destroy_contiguous_region()
++/*
++ * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
++ * are based on i386 xen_create_contiguous_region(),
++ * xen_destroy_contiguous_region()
++ */
/* Protected by balloon_lock. */
#define MAX_CONTIG_ORDER 7
balloon_lock(flags);
/* Get a new contiguous memory extent. */
-- for (i = 0; i < num_gpfn; i++) {
++ for (i = 0; i < num_gpfn; i++)
in_frames[i] = start_gpfn + i;
-- }
out_frame = start_gpfn;
error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
success = (exchange.nr_exchanged == num_gpfn);
.domid = DOMID_SELF
},
.nr_exchanged = 0
-- };
++ };
if (!test_bit(start_gpfn, contiguous_bitmap))
contiguous_bitmap_clear(start_gpfn, num_gpfn);
-- /* Do the exchange for non-contiguous MFNs. */
++ /* Do the exchange for non-contiguous MFNs. */
in_frame = start_gpfn;
-- for (i = 0; i < num_gpfn; i++) {
++ for (i = 0; i < num_gpfn; i++)
out_frames[i] = start_gpfn + i;
-- }
error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
success = (exchange.nr_exchanged == 1);
BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
BUG_ON(success && (error != 0));
if (unlikely(error == -ENOSYS)) {
-- /* Compatibility when XENMEM_exchange is unsupported. */
++ /* Compatibility when XENMEM_exchange is unsupported. */
error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&exchange.in);
BUG_ON(error != 1);
order, address_bits);
}
--
--///////////////////////////////////////////////////////////////////////////
--// grant table hack
--// cmd: GNTTABOP_xxx
--
++/****************************************************************************
++ * grant table hack
++ * cmd: GNTTABOP_xxx
++ */
#include <linux/mm.h>
#include <xen/interface/xen.h>
#include <xen/gnttab.h>
if (flags & GNTMAP_host_map) {
if (flags & GNTMAP_application_map) {
-- xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
++ xprintd("GNTMAP_application_map is not supported yet:"
++ " flags 0x%x\n", flags);
BUG();
}
if (flags & GNTMAP_contains_pte) {
-- xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
++ xprintd("GNTMAP_contains_pte is not supported yet"
++ " flags 0x%x\n", flags);
BUG();
}
} else if (flags & GNTMAP_device_map) {
-- xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
-- BUG();//XXX not yet. actually this flag is not used.
++ xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
++ flags);
++ BUG(); /* XXX not yet. actually this flag is not used. */
} else {
BUG();
}
}
EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
--///////////////////////////////////////////////////////////////////////////
--// foreign mapping
++/**************************************************************************
++ * foreign mapping
++ */
#include <linux/efi.h>
--#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
++#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
static unsigned long privcmd_resource_min = 0;
--// Xen/ia64 currently can handle pseudo physical address bits up to
--// (PAGE_SHIFT * 3)
--static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
++/* Xen/ia64 currently can handle pseudo physical address bits up to
++ * (PAGE_SHIFT * 3) */
++static unsigned long privcmd_resource_max =
++ GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
static unsigned long
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;
-- // at first check the used highest address
++ /* at first check the used highest address */
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-- // nothing
++ /* nothing */;
}
md = p - efi_desc_size;
privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
if (xen_ia64_privcmd_check_size(privcmd_resource_min,
-- privcmd_resource_max)) {
++ privcmd_resource_max))
goto out;
-- }
-- // the used highest address is too large. try to find the largest gap.
++ /* the used highest address is too large.
++ * try to find the largest gap. */
tmp_min = privcmd_resource_max;
tmp_max = 0;
gap_size = 0;
md = p;
end = md_end_addr(md);
-- if (end > privcmd_resource_max) {
++ if (end > privcmd_resource_max)
break;
-- }
if (end < prev_end) {
-- // work around.
-- // Xen may pass incompletely sorted memory
-- // descriptors like
-- // [x, x + length]
-- // [x, x]
-- // this order should be reversed.
++ /* work around.
++ * Xen may pass incompletely sorted memory
++ * descriptors like
++ * [x, x + length]
++ * [x, x]
++ * this order should be reversed. */
continue;
}
next = p + efi_desc_size;
next_start = next->phys_addr;
-- if (next_start > privcmd_resource_max) {
++ if (next_start > privcmd_resource_max)
next_start = privcmd_resource_max;
-- }
if (end < next_start && gap_size < (next_start - end)) {
tmp_min = end;
tmp_max = next_start;
privcmd_resource_max = tmp_max;
if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
privcmd_resource_max)) {
-- // Any large enough gap isn't found.
-- // go ahead anyway with the warning hoping that large region
-- // won't be requested.
-- printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
++ /* Any large enough gap isn't found.
++ * go ahead anyway with the warning hoping that large region
++ * won't be requested. */
++ printk(KERN_WARNING "xen privcmd: "
++ "large enough region for privcmd mmap is not found.\n");
}
out:
-- printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
++ printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
++ "[0x%lx, 0x%lx] (%ldMB)\n",
privcmd_resource_min, privcmd_resource_max,
(privcmd_resource_max - privcmd_resource_min) >> 20);
BUG_ON(privcmd_resource_min >= privcmd_resource_max);
-- // XXX this should be somewhere appropriate
++ /* XXX this should be somewhere appropriate */
(void)p2m_expose_init();
return 0;
struct xen_ia64_privcmd_range {
atomic_t ref_count;
-- unsigned long pgoff; // in PAGE_SIZE
-- struct resource* res;
++ unsigned long pgoff; /* in PAGE_SIZE */
++ struct resource *res;
-- // for foreign domain p2m mapping
-- void* private;
-- void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
++ /* for foreign domain p2m mapping */
++ void *private;
++ void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
unsigned long num_entries;
struct xen_ia64_privcmd_entry entries[0];
struct xen_ia64_privcmd_vma {
int is_privcmd_mmapped;
-- struct xen_ia64_privcmd_range* range;
++ struct xen_ia64_privcmd_range *range;
unsigned long num_entries;
-- struct xen_ia64_privcmd_entry* entries;
++ struct xen_ia64_privcmd_entry *entries;
};
static void
--xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
++xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
{
atomic_set(&entry->map_count, 0);
entry->gpfn = INVALID_GPFN;
}
static int
--xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
++xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
unsigned long addr,
-- struct xen_ia64_privcmd_range* privcmd_range,
++ struct xen_ia64_privcmd_range *privcmd_range,
int i,
unsigned long gmfn,
pgprot_t prot,
domid_t domid)
{
int error = 0;
-- struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
unsigned long gpfn;
unsigned long flags;
gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
flags = ASSIGN_writable;
-- if (pgprot_val(prot) == PROT_READ) {
++ if (pgprot_val(prot) == PROT_READ)
flags = ASSIGN_readonly;
-- }
error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
-- if (error != 0) {
++ if (error != 0)
goto out;
-- }
prot = vma->vm_page_prot;
error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
if (error != 0) {
error = HYPERVISOR_zap_physmap(gpfn, 0);
-- if (error) {
-- BUG();//XXX
-- }
++ if (error)
++ BUG(); /* XXX */
} else {
atomic_inc(&entry->map_count);
entry->gpfn = gpfn;
}
static void
--xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
int i)
{
-- struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
unsigned long gpfn = entry->gpfn;
-- //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
-- // (vma->vm_pgoff - privcmd_range->pgoff);
++ /* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
++ (vma->vm_pgoff - privcmd_range->pgoff); */
int error;
error = HYPERVISOR_zap_physmap(gpfn, 0);
-- if (error) {
-- BUG();//XXX
-- }
++ if (error)
++ BUG(); /* XXX */
entry->gpfn = INVALID_GPFN;
}
static void
--xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
int i)
{
-- struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
-- if (entry->gpfn != INVALID_GPFN) {
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++ if (entry->gpfn != INVALID_GPFN)
atomic_inc(&entry->map_count);
-- } else {
++ else
BUG_ON(atomic_read(&entry->map_count) != 0);
-- }
}
static void
--xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
++xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
int i)
{
-- struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
if (entry->gpfn != INVALID_GPFN &&
-- atomic_dec_and_test(&entry->map_count)) {
++ atomic_dec_and_test(&entry->map_count))
xen_ia64_privcmd_entry_munmap(privcmd_range, i);
-- }
}
--static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
--static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
++static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
++static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
.open = &xen_ia64_privcmd_vma_open,
};
static void
--__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
-- struct xen_ia64_privcmd_vma* privcmd_vma,
-- struct xen_ia64_privcmd_range* privcmd_range)
++__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
++ struct xen_ia64_privcmd_vma *privcmd_vma,
++ struct xen_ia64_privcmd_range *privcmd_range)
{
unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
-- unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long num_entries =
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long i;
BUG_ON(entry_offset < 0);
privcmd_vma->num_entries = num_entries;
privcmd_vma->entries = &privcmd_range->entries[entry_offset];
vma->vm_private_data = privcmd_vma;
-- for (i = 0; i < privcmd_vma->num_entries; i++) {
++ for (i = 0; i < privcmd_vma->num_entries; i++)
xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
-- }
vma->vm_private_data = privcmd_vma;
vma->vm_ops = &xen_ia64_privcmd_vm_ops;
}
static void
--xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
++xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
{
-- struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-- struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-- struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++ struct xen_ia64_privcmd_vma *old_privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_vma *privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
atomic_inc(&privcmd_range->ref_count);
-- // vm_op->open() can't fail.
++ /* vm_op->open() can't fail. */
privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
-- // copy original value if necessary
++ /* copy original value if necessary */
privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
}
static void
--xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
++xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
{
-- struct xen_ia64_privcmd_vma* privcmd_vma =
++ struct xen_ia64_privcmd_vma *privcmd_vma =
(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-- struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
unsigned long i;
if (atomic_dec_and_test(&privcmd_range->ref_count)) {
#if 1
for (i = 0; i < privcmd_range->num_entries; i++) {
-- struct xen_ia64_privcmd_entry* entry =
++ struct xen_ia64_privcmd_entry *entry =
&privcmd_range->entries[i];
BUG_ON(atomic_read(&entry->map_count) != 0);
BUG_ON(entry->gpfn != INVALID_GPFN);
int
privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
-- struct xen_ia64_privcmd_vma* privcmd_vma =
++ struct xen_ia64_privcmd_vma *privcmd_vma =
(struct xen_ia64_privcmd_vma *)vma->vm_private_data;
return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
}
int error;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long num_entries = size >> PAGE_SHIFT;
-- struct xen_ia64_privcmd_range* privcmd_range = NULL;
-- struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
-- struct resource* res = NULL;
++ struct xen_ia64_privcmd_range *privcmd_range = NULL;
++ struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
++ struct resource *res = NULL;
unsigned long i;
BUG_ON(!is_running_on_xen());
privcmd_range =
vmalloc(sizeof(*privcmd_range) +
sizeof(privcmd_range->entries[0]) * num_entries);
-- if (privcmd_range == NULL) {
++ if (privcmd_range == NULL)
goto out_enomem0;
-- }
privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
-- if (privcmd_vma == NULL) {
++ if (privcmd_vma == NULL)
goto out_enomem1;
-- }
privcmd_vma->is_privcmd_mmapped = 0;
res = kzalloc(sizeof(*res), GFP_KERNEL);
-- if (res == NULL) {
++ if (res == NULL)
goto out_enomem1;
-- }
res->name = "Xen privcmd mmap";
error = allocate_resource(&iomem_resource, res, size,
privcmd_resource_min, privcmd_resource_max,
privcmd_resource_align, NULL, NULL);
-- if (error) {
++ if (error)
goto out_enomem1;
-- }
privcmd_range->res = res;
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
privcmd_range->num_entries = num_entries;
privcmd_range->private = NULL;
privcmd_range->callback = NULL;
-- for (i = 0; i < privcmd_range->num_entries; i++) {
++ for (i = 0; i < privcmd_range->num_entries; i++)
xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
-- }
__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
return 0;
int
direct_remap_pfn_range(struct vm_area_struct *vma,
-- unsigned long address, // process virtual address
-- unsigned long gmfn, // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
++ unsigned long address, /* process virtual address */
++ unsigned long gmfn, /* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
unsigned long size,
pgprot_t prot,
-- domid_t domid) // target domain
++ domid_t domid) /* target domain */
{
-- struct xen_ia64_privcmd_vma* privcmd_vma =
++ struct xen_ia64_privcmd_vma *privcmd_vma =
(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-- struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
unsigned long i;
BUG_ON(!is_running_on_xen());
#if 0
-- if (prot != vm->vm_page_prot) {
++ if (prot != vm->vm_page_prot)
return -EINVAL;
-- }
#endif
i = (address - vma->vm_start) >> PAGE_SHIFT;
for (offset = 0; offset < size; offset += PAGE_SIZE) {
error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
-- if (error != 0) {
++ if (error != 0)
break;
-- }
i++;
gmfn++;
-- }
++ }
return error;
}
--///////////////////////////////////////////////////////////////////////////
--// expose p2m table
++/**************************************************************************
++ * expose p2m table
++ */
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
#include <linux/cpu.h>
#include <asm/uaccess.h>
};
static unsigned long p2m_assign_start_pfn __read_mostly;
static unsigned long p2m_assign_end_pfn __read_mostly;
--static unsigned long p2m_expose_size; // this is referenced only when resume.
-- // so __read_mostly doesn't make sense.
--volatile const pte_t* p2m_pte __read_mostly;
++static unsigned long p2m_expose_size; /* this is referenced only when resume.
++ * so __read_mostly doesn't make sense.
++ */
++volatile const pte_t *p2m_pte __read_mostly;
#define GRANULE_PFN PTRS_PER_PTE
static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
static int xen_ia64_p2m_expose __read_mostly = 1;
module_param(xen_ia64_p2m_expose, int, 0);
MODULE_PARM_DESC(xen_ia64_p2m_expose,
-- "enable/disable xen/ia64 p2m exposure optimization\n");
++ "enable/disable xen/ia64 p2m exposure optimization\n");
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
-- "use/unuse dtr to map exposed p2m table\n");
++ "use/unuse dtr to map exposed p2m table\n");
static const int p2m_page_shifts[] = {
_PAGE_SIZE_4K,
};
static struct p2m_itr_arg p2m_itr_arg __read_mostly;
--// This should be in asm-ia64/kregs.h
++/* This should be in asm-ia64/kregs.h */
#define IA64_TR_P2M_TABLE 3
static void
--p2m_itr(void* info)
++p2m_itr(void *info)
{
-- struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
++ struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
ia64_itr(0x2, IA64_TR_P2M_TABLE,
-- arg->vaddr, arg->pteval, arg->log_page_size);
++ arg->vaddr, arg->pteval, arg->log_page_size);
ia64_srlz_d();
}
static int
p2m_expose_dtr_call(struct notifier_block *self,
-- unsigned long event, void* ptr)
++ unsigned long event, void *ptr)
{
unsigned int cpu = (unsigned int)(long)ptr;
if (event != CPU_ONLINE)
continue;
granule_pfn = max(page_size >> PAGE_SHIFT,
-- p2m_granule_pfn);
++ p2m_granule_pfn);
p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-- granule_pfn);
++ granule_pfn);
p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
-- granule_pfn);
++ granule_pfn);
num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
p2m_expose_size = num_pfn << PAGE_SHIFT;
p2m_size = p2m_table_size(num_pfn);
-- p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
++ p2m_size = ROUNDUP(p2m_size,
++ granule_pfn << PAGE_SHIFT);
if (p2m_size == page_size)
break;
}
{
BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-- p2m_granule_pfn);
-- p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
++ p2m_granule_pfn);
++ p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
++ p2m_granule_pfn);
num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
p2m_expose_size = num_pfn << PAGE_SHIFT;
p2m_size = p2m_table_size(num_pfn);
p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
align = max(privcmd_resource_align,
-- p2m_granule_pfn << PAGE_SHIFT);
++ p2m_granule_pfn << PAGE_SHIFT);
}
-- // use privcmd region
++ /* use privcmd region */
error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
-- privcmd_resource_min, privcmd_resource_max,
-- align, NULL, NULL);
++ privcmd_resource_min, privcmd_resource_max,
++ align, NULL, NULL);
if (error) {
printk(KERN_ERR P2M_PREFIX
"can't allocate region for p2m exposure "
p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-- p2m_assign_start_pfn,
-- p2m_expose_size, p2m_granule_pfn);
++ p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);
if (error) {
printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
error);
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
if (xen_ia64_p2m_expose_use_dtr) {
p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
-- << PAGE_SHIFT);
++ << PAGE_SHIFT);
p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
-- PAGE_KERNEL));
++ PAGE_KERNEL));
p2m_itr_arg.log_page_size = log_page_size;
smp_mb();
smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
* interrupts are masked when resume.
*/
error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-- p2m_assign_start_pfn,
-- p2m_expose_size, p2m_granule_pfn);
++ p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);
if (error) {
printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
error);
}
}
--//XXX inlinize?
++/* XXX inlinize? */
unsigned long
p2m_phystomach(unsigned long gpfn)
{
-- volatile const pte_t* pte;
++ volatile const pte_t *pte;
unsigned long mfn;
unsigned long pteval;
mfn = INVALID_MFN;
if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
-- pte_present(__pte(pteval)) &&
-- pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
++ pte_present(__pte(pteval)) &&
++ pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
return mfn;
EXPORT_SYMBOL_GPL(p2m_pte);
EXPORT_SYMBOL_GPL(p2m_phystomach);
--///////////////////////////////////////////////////////////////////////////
--// foreign domain p2m mapping
++/**************************************************************************
++ * foreign domain p2m mapping
++ */
#include <asm/xen/xencomm.h>
#include <xen/public/privcmd.h>
};
static void
--xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range* privcmd_range,
-- void* arg)
++xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
++ void *arg)
{
-- struct foreign_p2m_private* private = (struct foreign_p2m_private*)arg;
++ struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
int ret;
privcmd_range->private = NULL;
}
int
--xen_foreign_p2m_expose(privcmd_hypercall_t* hypercall)
++xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
{
-- // hypercall->
-- // arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
-- // arg1: va
-- // arg2: domid
-- // arg3: __user* memmap_info
-- // arg4: flags
++ /*
++ * hypercall->
++ * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
++ * arg1: va
++ * arg2: domid
++ * arg3: __user* memmap_info
++ * arg4: flags
++ */
int ret = 0;
-- struct mm_struct* mm = current->mm;
++ struct mm_struct *mm = current->mm;
unsigned long vaddr = hypercall->arg[1];
domid_t domid = hypercall->arg[2];
struct xen_ia64_memmap_info memmap_info;
size_t memmap_size;
-- struct xen_ia64_memmap_info* k_memmap_info = NULL;
++ struct xen_ia64_memmap_info *k_memmap_info = NULL;
unsigned long max_gpfn;
unsigned long p2m_size;
-- struct resource* res;
++ struct resource *res;
unsigned long gpfn;
-- struct vm_area_struct* vma;
-- void* p;
++ struct vm_area_struct *vma;
++ void *p;
unsigned long prev_src_gpfn_end;
-- struct xen_ia64_privcmd_vma* privcmd_vma;
-- struct xen_ia64_privcmd_range* privcmd_range;
-- struct foreign_p2m_private* private = NULL;
++ struct xen_ia64_privcmd_vma *privcmd_vma;
++ struct xen_ia64_privcmd_range *privcmd_range;
++ struct foreign_p2m_private *private = NULL;
BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
}
gpfn = res->start >> PAGE_SHIFT;
-- // arg0: dest_gpfn
-- // arg1: domid
-- // arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
-- // arg3: flags
-- // The hypercall checks its intergirty/simplfies it and
-- // copy it back for us.
++ /*
++ * arg0: dest_gpfn
++ * arg1: domid
++ * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
++ * arg3: flags
++ * The hypercall checks its intergirty/simplfies it and
++ * copy it back for us.
++ */
ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
xencomm_map_no_alloc(k_memmap_info, memmap_size),
hypercall->arg[4]);
vma->vm_page_prot);
if (ret) {
for (i = 0; i < gpfn + gpfn_offset; i++) {
-- struct xen_ia64_privcmd_entry* entry =
++ struct xen_ia64_privcmd_entry *entry =
&privcmd_range->entries[i];
BUG_ON(atomic_read(&entry->map_count) != 1 &&
atomic_read(&entry->map_count) != 0);
for (i = gpfn_offset;
i < gpfn_offset + (size >> PAGE_SHIFT);
i++) {
-- struct xen_ia64_privcmd_entry* entry =
++ struct xen_ia64_privcmd_entry *entry =
&privcmd_range->entries[i];
BUG_ON(atomic_read(&entry->map_count) != 0);
BUG_ON(entry->gpfn != INVALID_GPFN);
}
#endif
--///////////////////////////////////////////////////////////////////////////
--// for xenoprof
--
++/**************************************************************************
++ * for xenoprof
++ */
struct resource*
xen_ia64_allocate_resource(unsigned long size)
{
-- struct resource* res;
++ struct resource *res;
int error;
res = kzalloc(sizeof(*res), GFP_KERNEL);
res->name = "Xen";
res->flags = IORESOURCE_MEM;
error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
-- privcmd_resource_min, privcmd_resource_max,
-- IA64_GRANULE_SIZE, NULL, NULL);
++ privcmd_resource_min, privcmd_resource_max,
++ IA64_GRANULE_SIZE, NULL, NULL);
if (error) {
kfree(res);
return ERR_PTR(error);
EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
void
--xen_ia64_release_resource(struct resource* res)
++xen_ia64_release_resource(struct resource *res)
{
release_resource(res);
kfree(res);
EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
void
--xen_ia64_unmap_resource(struct resource* res)
++xen_ia64_unmap_resource(struct resource *res)
{
unsigned long gpfn = res->start >> PAGE_SHIFT;
unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
}
EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
--///////////////////////////////////////////////////////////////////////////
--// opt feature
++/**************************************************************************
++ * opt feature
++ */
void
xen_ia64_enable_opt_feature(void)
{
HYPERVISOR_opt_feature(&optf);
}
--///////////////////////////////////////////////////////////////////////////
--// suspend/resume
++/**************************************************************************
++ * suspend/resume
++ */
void
xen_post_suspend(int suspend_cancelled)
{