ia64/xen-unstable

changeset 1403:9bb84189678d

bitkeeper revision 1.913 (40ab4ea8vu84ako0LR_rQN0fDuznVw)

Fix the mmu_update hypercall interface.
author kaf24@scramble.cl.cam.ac.uk
date Wed May 19 12:10:16 2004 +0000 (2004-05-19)
parents 2afa8e2ed882
children d11b603a8ccf
files extras/mini-os/h/hypervisor.h tools/xc/lib/xc_private.c xen/common/memory.c xen/include/xen/mm.h xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c xenolinux-2.4.26-sparse/arch/xen/kernel/traps.c xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h
line diff
     1.1 --- a/extras/mini-os/h/hypervisor.h	Wed May 19 10:16:44 2004 +0000
     1.2 +++ b/extras/mini-os/h/hypervisor.h	Wed May 19 12:10:16 2004 +0000
     1.3 @@ -50,13 +50,15 @@ static __inline__ int HYPERVISOR_set_tra
     1.4      return ret;
     1.5  }
     1.6  
     1.7 -static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req, int count)
     1.8 +static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req, 
     1.9 +                                            int count, 
    1.10 +                                            int *success_count)
    1.11  {
    1.12      int ret;
    1.13      __asm__ __volatile__ (
    1.14          TRAP_INSTR
    1.15          : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
    1.16 -        "b" (req), "c" (count) : "memory" );
    1.17 +        "b" (req), "c" (count), "d" (success_count)  : "memory" );
    1.18  
    1.19      return ret;
    1.20  }
     2.1 --- a/tools/xc/lib/xc_private.c	Wed May 19 10:16:44 2004 +0000
     2.2 +++ b/tools/xc/lib/xc_private.c	Wed May 19 12:10:16 2004 +0000
     2.3 @@ -323,7 +323,8 @@ static int flush_mmu_updates(int xc_hand
     2.4  
     2.5      hypercall.op     = __HYPERVISOR_mmu_update;
     2.6      hypercall.arg[0] = (unsigned long)mmu->updates;
     2.7 -    hypercall.arg[1] = (unsigned long)&(mmu->idx);
     2.8 +    hypercall.arg[1] = (unsigned long)mmu->idx;
     2.9 +    hypercall.arg[2] = 0;
    2.10  
    2.11      if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
    2.12      {
     3.1 --- a/xen/common/memory.c	Wed May 19 10:16:44 2004 +0000
     3.2 +++ b/xen/common/memory.c	Wed May 19 12:10:16 2004 +0000
     3.3 @@ -988,9 +988,8 @@ static int do_extended_command(unsigned 
     3.4  }
     3.5  
     3.6  
     3.7 -int do_mmu_update(mmu_update_t *ureqs, int * p_count)
     3.8 +int do_mmu_update(mmu_update_t *ureqs, int count, int *success_count)
     3.9  {
    3.10 -    int count;
    3.11      mmu_update_t req;
    3.12      unsigned long va = 0, deferred_ops, pfn, prev_pfn = 0;
    3.13      struct pfn_info *page;
    3.14 @@ -999,11 +998,6 @@ int do_mmu_update(mmu_update_t *ureqs, i
    3.15      unsigned long prev_spfn = 0;
    3.16      l1_pgentry_t *prev_spl1e = 0;
    3.17  
    3.18 -    if ( unlikely( get_user(count, p_count) ) )
    3.19 -    {
    3.20 -	return -EFAULT;
    3.21 -    }
    3.22 -
    3.23      perfc_incrc(calls_to_mmu_update); 
    3.24      perfc_addc(num_page_updates, count);
    3.25  
    3.26 @@ -1160,8 +1154,8 @@ int do_mmu_update(mmu_update_t *ureqs, i
    3.27          percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
    3.28      }
    3.29  
    3.30 -    if ( unlikely(rc) )
    3.31 -	put_user( count, p_count );
    3.32 +    if ( unlikely(success_count != NULL) )
    3.33 +	put_user(count, success_count);
    3.34  
    3.35      return rc;
    3.36  }
     4.1 --- a/xen/include/xen/mm.h	Wed May 19 10:16:44 2004 +0000
     4.2 +++ b/xen/include/xen/mm.h	Wed May 19 12:10:16 2004 +0000
     4.3 @@ -314,7 +314,7 @@ int check_descriptor(unsigned long a, un
     4.4  #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
     4.5  
     4.6  /* Part of the domain API. */
     4.7 -int do_mmu_update(mmu_update_t *updates, int *count);
     4.8 +int do_mmu_update(mmu_update_t *updates, int count, int *success_count);
     4.9  
    4.10  #define DEFAULT_GDT_ENTRIES     ((LAST_RESERVED_GDT_ENTRY*8)+7)
    4.11  #define DEFAULT_GDT_ADDRESS     ((unsigned long)gdt_table)
     5.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c	Wed May 19 10:16:44 2004 +0000
     5.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c	Wed May 19 12:10:16 2004 +0000
     5.3 @@ -89,18 +89,18 @@ static int privcmd_ioctl(struct inode *i
     5.4  	    if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
     5.5  		return -EFAULT;
     5.6  	    
     5.7 -	    for (j=0;j<n;j++)
     5.8 +	    for ( j = 0; j < n; j++ )
     5.9  	    {
    5.10  		struct vm_area_struct *vma = 
    5.11  		    find_vma( current->mm, msg[j].va );
    5.12  
    5.13 -		if (!vma)
    5.14 +		if ( !vma )
    5.15  		    return -EINVAL;
    5.16  
    5.17 -		if (msg[j].va > PAGE_OFFSET)
    5.18 +		if ( msg[j].va > PAGE_OFFSET )
    5.19  		    return -EINVAL;
    5.20  
    5.21 -		if (msg[j].va + (msg[j].npages<<PAGE_SHIFT) > vma->vm_end)
    5.22 +		if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
    5.23  		    return -EINVAL;
    5.24  
    5.25  		if ( (rc = direct_remap_area_pages(vma->vm_mm, 
    5.26 @@ -108,7 +108,7 @@ static int privcmd_ioctl(struct inode *i
    5.27  					    msg[j].mfn<<PAGE_SHIFT, 
    5.28  					    msg[j].npages<<PAGE_SHIFT, 
    5.29  					    vma->vm_page_prot,
    5.30 -					    mmapcmd.dom)) <0)
    5.31 +					    mmapcmd.dom)) < 0 )
    5.32  		    return rc;
    5.33  	    }
    5.34  	}
    5.35 @@ -131,21 +131,15 @@ static int privcmd_ioctl(struct inode *i
    5.36  
    5.37  	vma = find_vma( current->mm, m.addr );
    5.38  
    5.39 -	if (!vma)
    5.40 +	if ( !vma )
    5.41  	{ ret = -EINVAL; goto batch_err; }
    5.42  
    5.43 -	if (m.addr > PAGE_OFFSET)
    5.44 +	if ( m.addr > PAGE_OFFSET )
    5.45  	{ ret = -EFAULT; goto batch_err; }
    5.46  
    5.47 -	if (m.addr + (m.num<<PAGE_SHIFT) > vma->vm_end)
    5.48 +	if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
    5.49  	{ ret = -EFAULT; goto batch_err; }
    5.50  
    5.51 -	// everything fits inside the vma
    5.52 -
    5.53 -//printk("direct_r_a_p sx=%ld address=%lx macaddr=%lx dom=%lld\n",size,address,machine_addr,domid);
    5.54 -//    memset( u, 0, sizeof(mmu_update_t)*MAX_DIRECTMAP_MMU_QUEUE );// XXX
    5.55 -
    5.56 -
    5.57  	if ( m.dom != 0 )
    5.58  	{
    5.59  	    u[0].val  = (unsigned long)(m.dom<<16) & ~0xFFFFUL;
    5.60 @@ -165,35 +159,28 @@ static int privcmd_ioctl(struct inode *i
    5.61  
    5.62  	p = m.arr;
    5.63  	addr = m.addr;
    5.64 -//printk("BATCH: arr=%p addr=%lx num=%d u=%p,w=%p\n",p,addr,m.num,u,w);
    5.65 -	for (i=0; i<m.num; i++, addr+=PAGE_SIZE, p++)
    5.66 +	for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
    5.67  	{
    5.68 -	    unsigned int count;
    5.69  	    if ( get_user(mfn, p) ) return -EFAULT;
    5.70  
    5.71  	    v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot) |
    5.72  		_PAGE_IO;
    5.73  
    5.74 -	    __direct_remap_area_pages( vma->vm_mm,
    5.75 -				       addr, 
    5.76 -				       PAGE_SIZE, 
    5.77 -				       v);
    5.78 -	    v++;
    5.79 -	    count = v-u;
    5.80 -//printk("Q i=%d mfn=%x co=%d v=%p : %lx %lx\n",i,mfn,count,v, w->val,w->ptr);
    5.81 +	    __direct_remap_area_pages(vma->vm_mm,
    5.82 +                                      addr, 
    5.83 +                                      PAGE_SIZE, 
    5.84 +                                      v);
    5.85  
    5.86 -	    if ( HYPERVISOR_mmu_update(u, &count) < 0 )
    5.87 -	    {
    5.88 -		//printk("Fail %d->%d mfn=%lx\n",v-u,count, w->val);
    5.89 +	    if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
    5.90  		put_user( 0xe0000000 | mfn, p );
    5.91 -	    }
    5.92 -	    v=w;
    5.93 +
    5.94 +	    v = w;
    5.95  	}
    5.96  	ret = 0;
    5.97  	break;
    5.98  
    5.99      batch_err:
   5.100 -	printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%lx %lx-%lx\n", 
   5.101 +	printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
   5.102  	       ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
   5.103  	break;
   5.104      }
     6.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c	Wed May 19 10:16:44 2004 +0000
     6.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c	Wed May 19 12:10:16 2004 +0000
     6.3 @@ -231,6 +231,7 @@ static void net_rx_action(unsigned long 
     6.4          mcl[1].op = __HYPERVISOR_mmu_update;
     6.5          mcl[1].args[0] = (unsigned long)mmu;
     6.6          mcl[1].args[1] = 4;
     6.7 +        mcl[1].args[2] = 0;
     6.8  
     6.9          mmu += 4;
    6.10          mcl += 2;
     7.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c	Wed May 19 10:16:44 2004 +0000
     7.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c	Wed May 19 12:10:16 2004 +0000
     7.3 @@ -384,6 +384,7 @@ static int netif_poll(struct net_device 
     7.4          mcl->op = __HYPERVISOR_mmu_update;
     7.5          mcl->args[0] = (unsigned long)rx_mmu;
     7.6          mcl->args[1] = mmu - rx_mmu;
     7.7 +        mcl->args[2] = 0;
     7.8          mcl++;
     7.9          (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
    7.10      }
     8.1 --- a/xenolinux-2.4.26-sparse/arch/xen/kernel/traps.c	Wed May 19 10:16:44 2004 +0000
     8.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/kernel/traps.c	Wed May 19 12:10:16 2004 +0000
     8.3 @@ -317,12 +317,11 @@ asmlinkage void do_general_protection(st
     8.4  		__asm__ __volatile__ ( "sldt %0" : "=r" (ldt) );
     8.5  		if ( ldt == 0 )
     8.6  		{
     8.7 -		    int count = 1;
     8.8  		    mmu_update_t u;
     8.9  		    u.ptr  = MMU_EXTENDED_COMMAND;
    8.10  		    u.ptr |= (unsigned long)&default_ldt[0];
    8.11  		    u.val  = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
    8.12 -		    if ( unlikely(HYPERVISOR_mmu_update(&u, &count) < 0) )
    8.13 +		    if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0) )
    8.14  		    {
    8.15  			show_trace(NULL);
    8.16  			panic("Failed to install default LDT");
    8.17 @@ -644,7 +643,7 @@ void __init trap_init(void)
    8.18   * don't set them to safe values on entry to the kernel). At *any* point Xen 
    8.19   * may be entered due to a hardware interrupt --- on exit from Xen an invalid 
    8.20   * FS/GS will cause our failsafe_callback to be executed. This could occur, 
    8.21 - * for example, while the mmmu_update_queue is in an inconsistent state. This
    8.22 + * for example, while the mmu_update_queue is in an inconsistent state. This
    8.23   * is disastrous because the normal page-fault handler touches the update
    8.24   * queue!
    8.25   * 
     9.1 --- a/xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c	Wed May 19 10:16:44 2004 +0000
     9.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c	Wed May 19 12:10:16 2004 +0000
     9.3 @@ -40,13 +40,12 @@ static void DEBUG_allow_pt_reads(void)
     9.4      int i;
     9.5      for ( i = idx-1; i >= 0; i-- )
     9.6      {
     9.7 -	int count = 1;
     9.8          pte = update_debug_queue[i].ptep;
     9.9          if ( pte == NULL ) continue;
    9.10          update_debug_queue[i].ptep = NULL;
    9.11          update.ptr = virt_to_machine(pte);
    9.12          update.val = update_debug_queue[i].pteval;
    9.13 -        HYPERVISOR_mmu_update(&update, &count);
    9.14 +        HYPERVISOR_mmu_update(&update, 1, NULL);
    9.15      }
    9.16  }
    9.17  static void DEBUG_disallow_pt_read(unsigned long va)
    9.18 @@ -55,7 +54,6 @@ static void DEBUG_disallow_pt_read(unsig
    9.19      pmd_t *pmd;
    9.20      pgd_t *pgd;
    9.21      unsigned long pteval;
    9.22 -    int count = 1;
    9.23      /*
    9.24       * We may fault because of an already outstanding update.
    9.25       * That's okay -- it'll get fixed up in the fault handler.
    9.26 @@ -67,7 +65,7 @@ static void DEBUG_disallow_pt_read(unsig
    9.27      update.ptr = virt_to_machine(pte);
    9.28      pteval = *(unsigned long *)pte;
    9.29      update.val = pteval & ~_PAGE_PRESENT;
    9.30 -    HYPERVISOR_mmu_update(&update, &count);
    9.31 +    HYPERVISOR_mmu_update(&update, 1, NULL);
    9.32      update_debug_queue[idx].ptep = pte;
    9.33      update_debug_queue[idx].pteval = pteval;
    9.34  }
    9.35 @@ -103,9 +101,10 @@ void MULTICALL_flush_page_update_queue(v
    9.36  #endif
    9.37          idx = 0;
    9.38          wmb(); /* Make sure index is cleared first to avoid double updates. */
    9.39 -        queue_multicall2(__HYPERVISOR_mmu_update, 
    9.40 +        queue_multicall3(__HYPERVISOR_mmu_update, 
    9.41                           (unsigned long)update_queue, 
    9.42 -                         &_idx);
    9.43 +                         (unsigned long)_idx, 
    9.44 +                         (unsigned long)NULL);
    9.45      }
    9.46      spin_unlock_irqrestore(&update_lock, flags);
    9.47  }
    9.48 @@ -121,7 +120,7 @@ static inline void __flush_page_update_q
    9.49  #endif
    9.50      idx = 0;
    9.51      wmb(); /* Make sure index is cleared first to avoid double updates. */
    9.52 -    if ( unlikely(HYPERVISOR_mmu_update(update_queue, &_idx) < 0) )
    9.53 +    if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
    9.54          panic("Failed to execute MMU updates");
    9.55  }
    9.56  
    10.1 --- a/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c	Wed May 19 10:16:44 2004 +0000
    10.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c	Wed May 19 12:10:16 2004 +0000
    10.3 @@ -42,18 +42,11 @@ static inline void direct_remap_area_pte
    10.4          BUG();
    10.5  
    10.6      do {
    10.7 -#if 0 // XXX
    10.8 -        if (!pte_none(*pte)) {
    10.9 -            printk("direct_remap_area_pte: page already exists\n");
   10.10 -            BUG();
   10.11 -        }
   10.12 -#endif
   10.13          (*v)->ptr = virt_to_machine(pte);
   10.14          (*v)++;
   10.15          address += PAGE_SIZE;
   10.16          pte++;
   10.17      } while (address && (address < end));
   10.18 -    return ;
   10.19  }
   10.20  
   10.21  static inline int direct_remap_area_pmd(struct mm_struct *mm,
   10.22 @@ -71,7 +64,7 @@ static inline int direct_remap_area_pmd(
   10.23      if (address >= end)
   10.24          BUG();
   10.25      do {
   10.26 -        pte_t * pte = pte_alloc(mm, pmd, address);
   10.27 +        pte_t *pte = pte_alloc(mm, pmd, address);
   10.28          if (!pte)
   10.29              return -ENOMEM;
   10.30          direct_remap_area_pte(pte, address, end - address, v);
   10.31 @@ -117,7 +110,7 @@ int direct_remap_area_pages(struct mm_st
   10.32                              pgprot_t prot,
   10.33                              domid_t  domid)
   10.34  {
   10.35 -    int i, count;
   10.36 +    int i;
   10.37      unsigned long start_address;
   10.38  #define MAX_DIRECTMAP_MMU_QUEUE 130
   10.39      mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
   10.40 @@ -141,39 +134,42 @@ int direct_remap_area_pages(struct mm_st
   10.41  
   10.42      start_address = address;
   10.43  
   10.44 -    for(i=0; i<size; 
   10.45 -	i+=PAGE_SIZE, machine_addr+=PAGE_SIZE, address+=PAGE_SIZE, v++)
   10.46 +    for( i = 0; i < size; i += PAGE_SIZE )
   10.47      {
   10.48 -	if( (v-u) == MAX_DIRECTMAP_MMU_QUEUE )
   10.49 +	if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
   10.50  	{
   10.51 -	    /* get the ptep's filled in */
   10.52 +	    /* Fill in the PTE pointers. */
   10.53  	    __direct_remap_area_pages( mm,
   10.54  				       start_address, 
   10.55  				       address-start_address, 
   10.56  				       w);
   10.57  	    
   10.58 -	    count = v-u;
   10.59 -	    if ( HYPERVISOR_mmu_update(u, &count) < 0 )
   10.60 +	    if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
   10.61  		return -EFAULT;	    
   10.62 -	    v=w;
   10.63 +	    v = w;
   10.64  	    start_address = address;
   10.65  	}
   10.66  
   10.67 -	/* fill in the machine addresses */
   10.68 +	/*
   10.69 +         * Fill in the machine address: PTE ptr is done later by
   10.70 +         * __direct_remap_area_pages(). 
   10.71 +         */
   10.72          v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot) | _PAGE_IO;
   10.73 +
   10.74 +        machine_addr += PAGE_SIZE;
   10.75 +        address += PAGE_SIZE; 
   10.76 +        v++;
   10.77      }
   10.78  
   10.79 -    if(v!=w)
   10.80 +    if ( v != w )
   10.81      {
   10.82  	/* get the ptep's filled in */
   10.83 -	__direct_remap_area_pages( mm,
   10.84 -				   start_address, 
   10.85 -				   address-start_address, 
   10.86 -				   w);	 
   10.87 -	count = v-u;
   10.88 -	if ( HYPERVISOR_mmu_update(u, &count) < 0 )
   10.89 +	__direct_remap_area_pages(mm,
   10.90 +                                  start_address, 
   10.91 +                                  address-start_address, 
   10.92 +                                  w);	 
   10.93 +	if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
   10.94  	    return -EFAULT;	    
   10.95 -
   10.96      }
   10.97      
   10.98      return 0;
    11.1 --- a/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h	Wed May 19 10:16:44 2004 +0000
    11.2 +++ b/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h	Wed May 19 12:10:16 2004 +0000
    11.3 @@ -160,13 +160,15 @@ static inline int HYPERVISOR_set_trap_ta
    11.4      return ret;
    11.5  }
    11.6  
    11.7 -static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int *count)
    11.8 +static inline int HYPERVISOR_mmu_update(mmu_update_t *req, 
    11.9 +                                        int count, 
   11.10 +                                        int *success_count)
   11.11  {
   11.12      int ret;
   11.13      __asm__ __volatile__ (
   11.14          TRAP_INSTR
   11.15          : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
   11.16 -        "b" (req), "c" (count) : "memory" );
   11.17 +        "b" (req), "c" (count), "d" (success_count) : "memory" );
   11.18  
   11.19      return ret;
   11.20  }