static unsigned short mmap_lock = 0;
static unsigned short mmap_inuse = 0;
+#define BLKTAP_VMA_MAPPED 5
+static inline int blktap_vma_mapped(struct tap_blkif *info)
+{
+ return test_bit(BLKTAP_VMA_MAPPED, &info->dev_inuse);
+}
+
/******************************************************************
* GRANT HANDLES
*/
return copy;
}
-static void blktap_vm_close(struct vm_area_struct *vma)
+static void blktap_vm_unmap(struct vm_area_struct *vma)
{
struct tap_blkif *info = vma->vm_private_data;
down_write(&info->vm_update_sem);
+ clear_bit(BLKTAP_VMA_MAPPED, &info->dev_inuse);
+ up_write(&info->vm_update_sem);
+}
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
+static void blktap_vm_close(struct vm_area_struct *vma)
+{
+ struct tap_blkif *info = vma->vm_private_data;
+
+ down_write(&info->vm_update_sem);
kfree(info->foreign_map.map);
static
struct vm_operations_struct blktap_vm_ops = {
+ unmap: blktap_vm_unmap,
close: blktap_vm_close,
fault: blktap_fault,
zap_pte: blktap_clear_pte,
info->pid = current->pid;
DPRINTK("blktap: mapping pid is %d\n", info->pid);
+ set_bit(BLKTAP_VMA_MAPPED, &info->dev_inuse);
info->vma = vma;
return 0;
break;
}
+ if (!blktap_vma_mapped(info)) {
+ WPRINTK("vma unmapped but not closed\n");
+ up_read(&info->vm_update_sem);
+ continue;
+ }
+
if (do_block_io_op(info))
blkif->waiting_reqs = 1;
else
pte_t (*zap_pte)(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, int is_fullmm);
#endif
+
+ /* called before close() to indicate no more pages should be mapped */
+ void (*unmap)(struct vm_area_struct *area);
+
#ifdef CONFIG_NUMA
/*
* set_policy() op must add a reference to any non-NULL @new mempolicy
tlb_finish_mmu(tlb, start, end);
}
+static inline void unmap_vma(struct vm_area_struct *vma)
+{
+ if (unlikely(vma->vm_ops && vma->vm_ops->unmap))
+ vma->vm_ops->unmap(vma);
+}
+
/*
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
do {
rb_erase(&vma->vm_rb, &mm->mm_rb);
+ unmap_vma(vma);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather *tlb;
- struct vm_area_struct *vma = mm->mmap;
+ struct vm_area_struct *vma_tmp, *vma = mm->mmap;
unsigned long nr_accounted = 0;
unsigned long end;
arch_exit_mmap(mm);
mmu_notifier_release(mm);
+ for (vma_tmp = mm->mmap; vma_tmp; vma_tmp = vma_tmp->vm_next)
+ unmap_vma(vma_tmp);
+
lru_add_drain();
flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1);