ia64/xen-unstable

changeset 9904:7997d8f16240

Change semantics of grant transfers for vp guests so that the
operation automatically gets you a fresh page at the same
pseudo-physical address as Keir suggested.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kaf24@firebug.cl.cam.ac.uk
date Mon May 01 15:28:01 2006 +0100 (2006-05-01)
parents a19cc748469e
children f06be4aeb1f1
files linux-2.6-xen-sparse/drivers/xen/netback/netback.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Sun Apr 30 09:52:59 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon May 01 15:28:01 2006 +0100
     1.3 @@ -235,23 +235,35 @@ static void net_rx_action(unsigned long 
     1.4  		vdata   = (unsigned long)skb->data;
     1.5  		old_mfn = virt_to_mfn(vdata);
     1.6  
     1.7 -		/* Memory squeeze? Back off for an arbitrary while. */
     1.8 -		if ((new_mfn = alloc_mfn()) == 0) {
     1.9 -			if ( net_ratelimit() )
    1.10 -				WPRINTK("Memory squeeze in netback driver.\n");
    1.11 -			mod_timer(&net_timer, jiffies + HZ);
    1.12 -			skb_queue_head(&rx_queue, skb);
    1.13 -			break;
    1.14 +		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    1.15 +			/* Memory squeeze? Back off for an arbitrary while. */
    1.16 +			if ((new_mfn = alloc_mfn()) == 0) {
    1.17 +				if ( net_ratelimit() )
    1.18 +					WPRINTK("Memory squeeze in netback "
    1.19 +						"driver.\n");
    1.20 +				mod_timer(&net_timer, jiffies + HZ);
    1.21 +				skb_queue_head(&rx_queue, skb);
    1.22 +				break;
    1.23 +			}
    1.24 +			/*
    1.25 +			 * Set the new P2M table entry before reassigning
    1.26 +			 * the old data page. Heed the comment in
    1.27 +			 * pgtable-2level.h:pte_page(). :-)
    1.28 +			 */
    1.29 +			set_phys_to_machine(
    1.30 +				__pa(skb->data) >> PAGE_SHIFT,
    1.31 +				new_mfn);
    1.32 +
    1.33 +			MULTI_update_va_mapping(mcl, vdata,
    1.34 +						pfn_pte_ma(new_mfn,
    1.35 +							   PAGE_KERNEL), 0);
    1.36 +			mcl++;
    1.37 +
    1.38 +			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
    1.39 +				MMU_MACHPHYS_UPDATE;
    1.40 +			mmu->val = __pa(vdata) >> PAGE_SHIFT;
    1.41 +			mmu++;
    1.42  		}
    1.43 -		/*
    1.44 -		 * Set the new P2M table entry before reassigning the old data
    1.45 -		 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
    1.46 -		 */
    1.47 -		set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
    1.48 -
    1.49 -		MULTI_update_va_mapping(mcl, vdata,
    1.50 -					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
    1.51 -		mcl++;
    1.52  
    1.53  		gop->mfn = old_mfn;
    1.54  		gop->domid = netif->domid;
    1.55 @@ -260,13 +272,6 @@ static void net_rx_action(unsigned long 
    1.56  		netif->rx.req_cons++;
    1.57  		gop++;
    1.58  
    1.59 -		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    1.60 -			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
    1.61 -				MMU_MACHPHYS_UPDATE;
    1.62 -			mmu->val = __pa(vdata) >> PAGE_SHIFT;
    1.63 -			mmu++;
    1.64 -		}
    1.65 -
    1.66  		__skb_queue_tail(&rxq, skb);
    1.67  
    1.68  		/* Filled the batch queue? */
    1.69 @@ -274,23 +279,25 @@ static void net_rx_action(unsigned long 
    1.70  			break;
    1.71  	}
    1.72  
    1.73 -	if (mcl == rx_mcl)
    1.74 -		return;
    1.75 -
    1.76 -	mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
    1.77 +	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    1.78 +		if (mcl == rx_mcl)
    1.79 +			return;
    1.80  
    1.81 -	if (mmu - rx_mmu) {
    1.82 -		mcl->op = __HYPERVISOR_mmu_update;
    1.83 -		mcl->args[0] = (unsigned long)rx_mmu;
    1.84 -		mcl->args[1] = mmu - rx_mmu;
    1.85 -		mcl->args[2] = 0;
    1.86 -		mcl->args[3] = DOMID_SELF;
    1.87 -		mcl++;
    1.88 +		mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
    1.89 +
    1.90 +		if (mmu - rx_mmu) {
    1.91 +			mcl->op = __HYPERVISOR_mmu_update;
    1.92 +			mcl->args[0] = (unsigned long)rx_mmu;
    1.93 +			mcl->args[1] = mmu - rx_mmu;
    1.94 +			mcl->args[2] = 0;
    1.95 +			mcl->args[3] = DOMID_SELF;
    1.96 +			mcl++;
    1.97 +		}
    1.98 +
    1.99 +		ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.100 +		BUG_ON(ret != 0);
   1.101  	}
   1.102  
   1.103 -	ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.104 -	BUG_ON(ret != 0);
   1.105 -
   1.106  	ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
   1.107  					gop - grant_rx_op);
   1.108  	BUG_ON(ret != 0);
   1.109 @@ -308,8 +315,11 @@ static void net_rx_action(unsigned long 
   1.110  		netif->stats.tx_bytes += size;
   1.111  		netif->stats.tx_packets++;
   1.112  
   1.113 -		/* The update_va_mapping() must not fail. */
   1.114 -		BUG_ON(mcl->result != 0);
   1.115 +		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.116 +			/* The update_va_mapping() must not fail. */
   1.117 +			BUG_ON(mcl->result != 0);
   1.118 +			mcl++;
   1.119 +		}
   1.120  
   1.121  		/* Check the reassignment error code. */
   1.122  		status = NETIF_RSP_OKAY;
   1.123 @@ -340,7 +350,6 @@ static void net_rx_action(unsigned long 
   1.124  
   1.125  		netif_put(netif);
   1.126  		dev_kfree_skb(skb);
   1.127 -		mcl++;
   1.128  		gop++;
   1.129  	}
   1.130