ia64/xen-unstable

changeset 10012:06e5c5599147

[IA64] Allow multiple-time mmap of the privcmd device

Allow multiple-time mmap of the privcmd device. The old implemntation
doesn't allow multiple-time mmap for a same struct file_struct.
However xend or qemu does multiple-time mmap.
This patch affects only dom0 vp model. With this patch multiple domu can
boot simultaneously and vti domain can boot on vp model dom0.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Tue May 16 09:05:36 2006 -0600 (2006-05-16)
parents 608ac00f4cfc
children 77ccce98ddef
files linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Tue May 16 08:59:26 2006 -0600
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Tue May 16 09:05:36 2006 -0600
     1.3 @@ -23,6 +23,7 @@
     1.4  //#include <linux/kernel.h>
     1.5  #include <linux/spinlock.h>
     1.6  #include <linux/bootmem.h>
     1.7 +#include <linux/vmalloc.h>
     1.8  #include <asm/page.h>
     1.9  #include <asm/hypervisor.h>
    1.10  #include <asm/hypercall.h>
    1.11 @@ -363,7 +364,6 @@ struct address_space xen_ia64_foreign_du
    1.12  struct xen_ia64_privcmd_entry {
    1.13  	atomic_t	map_count;
    1.14  	struct page*	page;
    1.15 -	unsigned long	mfn;
    1.16  };
    1.17  
    1.18  static void
    1.19 @@ -371,9 +371,13 @@ xen_ia64_privcmd_init_entry(struct xen_i
    1.20  {
    1.21  	atomic_set(&entry->map_count, 0);
    1.22  	entry->page = NULL;
    1.23 -	entry->mfn = INVALID_MFN;
    1.24  }
    1.25  
    1.26 +//TODO alloc_page() to allocate pseudo physical address space is 
    1.27 +//     waste of memory.
    1.28 +//     When vti domain is created, qemu maps all of vti domain pages which 
    1.29 +//     reaches to several hundred megabytes at least.
    1.30 +//     remove alloc_page().
    1.31  static int
    1.32  xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
    1.33  			    unsigned long addr,
    1.34 @@ -418,7 +422,6 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
    1.35  	} else {
    1.36  		atomic_inc(&entry->map_count);
    1.37  		entry->page = page;
    1.38 -		entry->mfn = mfn;
    1.39  	}
    1.40  
    1.41  out:
    1.42 @@ -443,7 +446,6 @@ xen_ia64_privcmd_entry_munmap(struct xen
    1.43  	}
    1.44  
    1.45  	entry->page = NULL;
    1.46 -	entry->mfn = INVALID_MFN;
    1.47  	__free_page(page);
    1.48  }
    1.49  
    1.50 @@ -465,9 +467,8 @@ xen_ia64_privcmd_entry_close(struct xen_
    1.51  	}
    1.52  }
    1.53  
    1.54 -struct xen_ia64_privcmd_file {
    1.55 -	struct file*			file;
    1.56 -	atomic_t			map_count;
    1.57 +struct xen_ia64_privcmd_range {
    1.58 +	atomic_t			ref_count;
    1.59  	unsigned long			pgoff; // in PAGE_SIZE
    1.60  
    1.61  	unsigned long			num_entries;
    1.62 @@ -475,7 +476,8 @@ struct xen_ia64_privcmd_file {
    1.63  };
    1.64  
    1.65  struct xen_ia64_privcmd_vma {
    1.66 -	struct xen_ia64_privcmd_file*	file;
    1.67 +	struct xen_ia64_privcmd_range*	range;
    1.68 +
    1.69  	unsigned long			num_entries;
    1.70  	struct xen_ia64_privcmd_entry*	entries;
    1.71  };
    1.72 @@ -490,20 +492,19 @@ struct vm_operations_struct xen_ia64_pri
    1.73  
    1.74  static void
    1.75  __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
    1.76 -			    struct xen_ia64_privcmd_vma* privcmd_vma)
    1.77 +			    struct xen_ia64_privcmd_vma* privcmd_vma,
    1.78 +			    struct xen_ia64_privcmd_range* privcmd_range)
    1.79  {
    1.80 -	struct xen_ia64_privcmd_file* privcmd_file =
    1.81 -		(struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
    1.82 -	unsigned long entry_offset = vma->vm_pgoff - privcmd_file->pgoff;
    1.83 +	unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
    1.84  	unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
    1.85  	unsigned long i;
    1.86  
    1.87  	BUG_ON(entry_offset < 0);
    1.88 -	BUG_ON(entry_offset + num_entries > privcmd_file->num_entries);
    1.89 +	BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
    1.90  
    1.91 -	privcmd_vma->file = privcmd_file;
    1.92 +	privcmd_vma->range = privcmd_range;
    1.93  	privcmd_vma->num_entries = num_entries;
    1.94 -	privcmd_vma->entries = &privcmd_file->entries[entry_offset];
    1.95 +	privcmd_vma->entries = &privcmd_range->entries[entry_offset];
    1.96  	vma->vm_private_data = privcmd_vma;
    1.97  	for (i = 0; i < privcmd_vma->num_entries; i++) {
    1.98  		xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
    1.99 @@ -516,15 +517,14 @@ static void
   1.100  static void
   1.101  xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
   1.102  {
   1.103 -	struct xen_ia64_privcmd_file* privcmd_file =
   1.104 -		(struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
   1.105 -	struct xen_ia64_privcmd_vma* privcmd_vma;
   1.106 +	struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   1.107 +	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
   1.108  
   1.109 -	atomic_inc(&privcmd_file->map_count);
   1.110 +	atomic_inc(&privcmd_range->ref_count);
   1.111  	// vm_op->open() can't fail.
   1.112  	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
   1.113  
   1.114 -	__xen_ia64_privcmd_vma_open(vma, privcmd_vma);
   1.115 +	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
   1.116  }
   1.117  
   1.118  static void
   1.119 @@ -532,7 +532,7 @@ xen_ia64_privcmd_vma_close(struct vm_are
   1.120  {
   1.121  	struct xen_ia64_privcmd_vma* privcmd_vma =
   1.122  		(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
   1.123 -	struct xen_ia64_privcmd_file* privcmd_file = privcmd_vma->file;
   1.124 +	struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
   1.125  	unsigned long i;
   1.126  
   1.127  	for (i = 0; i < privcmd_vma->num_entries; i++) {
   1.128 @@ -541,17 +541,16 @@ xen_ia64_privcmd_vma_close(struct vm_are
   1.129  	vma->vm_private_data = NULL;
   1.130  	kfree(privcmd_vma);
   1.131  
   1.132 -	if (atomic_dec_and_test(&privcmd_file->map_count)) {
   1.133 +	if (atomic_dec_and_test(&privcmd_range->ref_count)) {
   1.134  #if 1
   1.135 -		for (i = 0; i < privcmd_file->num_entries; i++) {
   1.136 +		for (i = 0; i < privcmd_range->num_entries; i++) {
   1.137  			struct xen_ia64_privcmd_entry* entry =
   1.138 -				&privcmd_vma->entries[i];
   1.139 +				&privcmd_range->entries[i];
   1.140  			BUG_ON(atomic_read(&entry->map_count) != 0);
   1.141  			BUG_ON(entry->page != NULL);
   1.142  		}
   1.143  #endif
   1.144 -		privcmd_file->file->private_data = NULL;
   1.145 -		kfree(privcmd_file->file->private_data);
   1.146 +		vfree(privcmd_range);
   1.147  	}
   1.148  }
   1.149  
   1.150 @@ -559,22 +558,16 @@ int
   1.151  privcmd_mmap(struct file * file, struct vm_area_struct * vma)
   1.152  {
   1.153  	unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
   1.154 -	struct xen_ia64_privcmd_file* privcmd_file;
   1.155 +	struct xen_ia64_privcmd_range* privcmd_range;
   1.156  	struct xen_ia64_privcmd_vma* privcmd_vma;
   1.157  	unsigned long i;
   1.158  	BUG_ON(!running_on_xen);
   1.159  
   1.160 -        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
   1.161 -        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
   1.162 -
   1.163 -	if (file->private_data != NULL) {
   1.164 -		return -EBUSY;
   1.165 -	}
   1.166 -
   1.167 -	privcmd_file = kmalloc(sizeof(*privcmd_file) +
   1.168 -			       sizeof(privcmd_file->entries[0]) * num_entries,
   1.169 -			       GFP_KERNEL);
   1.170 -	if (privcmd_file == NULL) {
   1.171 +	BUG_ON(file->private_data != NULL);
   1.172 +	privcmd_range =
   1.173 +		vmalloc(sizeof(*privcmd_range) +
   1.174 +			sizeof(privcmd_range->entries[0]) * num_entries);
   1.175 +	if (privcmd_range == NULL) {
   1.176  		goto out_enomem0;
   1.177  	}
   1.178  	privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
   1.179 @@ -582,22 +575,23 @@ privcmd_mmap(struct file * file, struct 
   1.180  		goto out_enomem1;
   1.181  	}
   1.182  
   1.183 -	atomic_set(&privcmd_file->map_count, 1);
   1.184 -	privcmd_file->num_entries = num_entries;
   1.185 -	for (i = 0; i < privcmd_file->num_entries; i++) {
   1.186 -		xen_ia64_privcmd_init_entry(&privcmd_file->entries[i]);
   1.187 +	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
   1.188 +	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
   1.189 +
   1.190 +	atomic_set(&privcmd_range->ref_count, 1);
   1.191 +	privcmd_range->pgoff = vma->vm_pgoff;
   1.192 +	privcmd_range->num_entries = num_entries;
   1.193 +	for (i = 0; i < privcmd_range->num_entries; i++) {
   1.194 +		xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
   1.195  	}
   1.196 -	file->private_data = privcmd_file;
   1.197 -	privcmd_file->file = file;
   1.198 -	privcmd_file->pgoff = vma->vm_pgoff;
   1.199  
   1.200 -	__xen_ia64_privcmd_vma_open(vma, privcmd_vma);
   1.201 +	__xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
   1.202  	return 0;
   1.203  
   1.204  out_enomem1:
   1.205  	kfree(privcmd_vma);
   1.206  out_enomem0:
   1.207 -	kfree(privcmd_file);
   1.208 +	vfree(privcmd_range);
   1.209  	return -ENOMEM;
   1.210  }
   1.211  
   1.212 @@ -625,7 +619,7 @@ direct_remap_pfn_range(struct vm_area_st
   1.213  	i = (address - vma->vm_start) >> PAGE_SHIFT;
   1.214  	for (offset = 0; offset < size; offset += PAGE_SIZE) {
   1.215  		struct xen_ia64_privcmd_entry* entry =
   1.216 -			&privcmd_vma->file->entries[i];
   1.217 +			&privcmd_vma->entries[i];
   1.218  		error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, entry, mfn, prot, domid);
   1.219  		if (error != 0) {
   1.220  			break;