ia64/xen-unstable

changeset 8979:3dde68484683

Fix netfront receive path for auto_translate_physmap mode.

Signed-off-by: Steven Smith <sos22@cam.ac.uk>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 23 15:22:12 2006 +0100 (2006-02-23)
parents 066ac36725f3
children b883ef5fad48
files linux-2.6-xen-sparse/drivers/xen/core/gnttab.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/include/xen/gnttab.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Thu Feb 23 14:50:00 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Thu Feb 23 15:22:12 2006 +0100
     1.3 @@ -222,25 +222,22 @@ gnttab_end_foreign_access(grant_ref_t re
     1.4  }
     1.5  
     1.6  int
     1.7 -gnttab_grant_foreign_transfer(domid_t domid)
     1.8 +gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
     1.9  {
    1.10  	int ref;
    1.11  
    1.12  	if (unlikely((ref = get_free_entry()) == -1))
    1.13  		return -ENOSPC;
    1.14 -
    1.15 -	shared[ref].frame = 0;
    1.16 -	shared[ref].domid = domid;
    1.17 -	wmb();
    1.18 -	shared[ref].flags = GTF_accept_transfer;
    1.19 +	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
    1.20  
    1.21  	return ref;
    1.22  }
    1.23  
    1.24  void
    1.25 -gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
    1.26 +gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
    1.27 +				  unsigned long pfn)
    1.28  {
    1.29 -	shared[ref].frame = 0;
    1.30 +	shared[ref].frame = pfn;
    1.31  	shared[ref].domid = domid;
    1.32  	wmb();
    1.33  	shared[ref].flags = GTF_accept_transfer;
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Feb 23 14:50:00 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Feb 23 15:22:12 2006 +0100
     2.3 @@ -587,25 +587,23 @@ static void network_alloc_rx_buffers(str
     2.4  		BUG_ON((signed short)ref < 0);
     2.5  		np->grant_rx_ref[id] = ref;
     2.6  		gnttab_grant_foreign_transfer_ref(ref,
     2.7 -						  np->xbdev->otherend_id);
     2.8 +						  np->xbdev->otherend_id,
     2.9 +						  __pa(skb->head) >> PAGE_SHIFT);
    2.10  		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
    2.11  		rx_pfn_array[i] = virt_to_mfn(skb->head);
    2.12  
    2.13 -		/* Remove this page from map before passing back to Xen. */
    2.14 -		set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
    2.15 -				    INVALID_P2M_ENTRY);
    2.16 -
    2.17 -		MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
    2.18 -					__pte(0), 0);
    2.19 +		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    2.20 +			/* Remove this page before passing back to Xen. */
    2.21 +			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
    2.22 +					    INVALID_P2M_ENTRY);
    2.23 +			MULTI_update_va_mapping(rx_mcl+i,
    2.24 +						(unsigned long)skb->head,
    2.25 +						__pte(0), 0);
    2.26 +		}
    2.27  	}
    2.28  
    2.29 -	/* After all PTEs have been zapped we blow away stale TLB entries. */
    2.30 -	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
    2.31 -
    2.32 -	/* Give away a batch of pages. */
    2.33 -	rx_mcl[i].op = __HYPERVISOR_memory_op;
    2.34 -	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
    2.35 -	rx_mcl[i].args[1] = (unsigned long)&reservation;
    2.36 +	/* Tell the ballon driver what is going on. */
    2.37 +	balloon_update_driver_allowance(i);
    2.38  
    2.39  	reservation.extent_start = rx_pfn_array;
    2.40  	reservation.nr_extents   = i;
    2.41 @@ -613,15 +611,27 @@ static void network_alloc_rx_buffers(str
    2.42  	reservation.address_bits = 0;
    2.43  	reservation.domid        = DOMID_SELF;
    2.44  
    2.45 -	/* Tell the ballon driver what is going on. */
    2.46 -	balloon_update_driver_allowance(i);
    2.47 +	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    2.48 +		/* After all PTEs have been zapped, flush the TLB. */
    2.49 +		rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
    2.50 +			UVMF_TLB_FLUSH|UVMF_ALL;
    2.51  
    2.52 -	/* Zap PTEs and give away pages in one big multicall. */
    2.53 -	(void)HYPERVISOR_multicall(rx_mcl, i+1);
    2.54 +		/* Give away a batch of pages. */
    2.55 +		rx_mcl[i].op = __HYPERVISOR_memory_op;
    2.56 +		rx_mcl[i].args[0] = XENMEM_decrease_reservation;
    2.57 +		rx_mcl[i].args[1] = (unsigned long)&reservation;
    2.58  
    2.59 -	/* Check return status of HYPERVISOR_memory_op(). */
    2.60 -	if (unlikely(rx_mcl[i].result != i))
    2.61 -		panic("Unable to reduce memory reservation\n");
    2.62 +		/* Zap PTEs and give away pages in one big multicall. */
    2.63 +		(void)HYPERVISOR_multicall(rx_mcl, i+1);
    2.64 +
    2.65 +		/* Check return status of HYPERVISOR_memory_op(). */
    2.66 +		if (unlikely(rx_mcl[i].result != i))
    2.67 +			panic("Unable to reduce memory reservation\n");
    2.68 +	} else {
    2.69 +		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
    2.70 +					 &reservation) != i)
    2.71 +			panic("Unable to reduce memory reservation\n");
    2.72 +	}
    2.73  
    2.74  	/* Above is a suitable barrier to ensure backend will see requests. */
    2.75  	np->rx.req_prod_pvt = req_prod + i;
    2.76 @@ -802,17 +812,19 @@ static int netif_poll(struct net_device 
    2.77  		np->stats.rx_packets++;
    2.78  		np->stats.rx_bytes += rx->status;
    2.79  
    2.80 -		/* Remap the page. */
    2.81 -		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
    2.82 -					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
    2.83 -		mcl++;
    2.84  		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    2.85 +			/* Remap the page. */
    2.86 +			MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
    2.87 +						pfn_pte_ma(mfn, PAGE_KERNEL),
    2.88 +						0);
    2.89 +			mcl++;
    2.90  			mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
    2.91  				| MMU_MACHPHYS_UPDATE;
    2.92  			mmu->val = __pa(skb->head) >> PAGE_SHIFT;
    2.93  			mmu++;
    2.94  
    2.95 -			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, mfn);
    2.96 +			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
    2.97 +					    mfn);
    2.98  		}
    2.99  
   2.100  		__skb_queue_tail(&rxq, skb);
   2.101 @@ -1003,7 +1015,8 @@ static void network_connect(struct net_d
   2.102  		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
   2.103  			continue;
   2.104  		gnttab_grant_foreign_transfer_ref(
   2.105 -			np->grant_rx_ref[i], np->xbdev->otherend_id);
   2.106 +			np->grant_rx_ref[i], np->xbdev->otherend_id,
   2.107 +			__pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
   2.108  		RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
   2.109  			np->grant_rx_ref[i];
   2.110  		RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
     3.1 --- a/linux-2.6-xen-sparse/include/xen/gnttab.h	Thu Feb 23 14:50:00 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/include/xen/gnttab.h	Thu Feb 23 15:22:12 2006 +0100
     3.3 @@ -71,7 +71,7 @@ int gnttab_end_foreign_access_ref(grant_
     3.4  void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
     3.5  			       unsigned long page);
     3.6  
     3.7 -int gnttab_grant_foreign_transfer(domid_t domid);
     3.8 +int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
     3.9  
    3.10  unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
    3.11  unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
    3.12 @@ -98,7 +98,8 @@ void gnttab_request_free_callback(struct
    3.13  void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
    3.14  				     unsigned long frame, int readonly);
    3.15  
    3.16 -void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid);
    3.17 +void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
    3.18 +				       unsigned long pfn);
    3.19  
    3.20  #ifdef __ia64__
    3.21  #define gnttab_map_vaddr(map) __va(map.dev_bus_addr)