direct-io.hg

changeset 11144:c757ebffd500

[NET] back: Support copying packets to the frontend on receive path.
Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 16 16:01:00 2006 +0100 (2006-08-16)
parents e2e7f4c17b77
children 5233c4b076b9
files linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Wed Aug 16 14:27:30 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Wed Aug 16 16:01:00 2006 +0100
     1.3 @@ -78,7 +78,10 @@ typedef struct netif_st {
     1.4  
     1.5  	/* Set of features that can be turned on in dev->features. */
     1.6  	int features;
     1.7 -	int can_queue;
     1.8 +
     1.9 +	/* Internal feature information. */
    1.10 +	int can_queue:1;	/* can queue packets for receiver? */
    1.11 +	int copying_receiver:1;	/* copy packets to receiver?       */
    1.12  
    1.13  	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
    1.14  	RING_IDX rx_req_cons_peek;
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Wed Aug 16 14:27:30 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Wed Aug 16 16:01:00 2006 +0100
     2.3 @@ -43,6 +43,7 @@
     2.4  struct netbk_rx_meta {
     2.5  	skb_frag_t frag;
     2.6  	int id;
     2.7 +	int copy:1;
     2.8  };
     2.9  
    2.10  static void netif_idx_release(u16 pending_idx);
    2.11 @@ -72,6 +73,8 @@ static struct sk_buff_head rx_queue;
    2.12  static unsigned long mmap_vstart;
    2.13  #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
    2.14  
    2.15 +static void *rx_mmap_area;
    2.16 +
    2.17  #define PKT_PROT_LEN 64
    2.18  
    2.19  static struct {
    2.20 @@ -277,12 +280,11 @@ int netif_be_start_xmit(struct sk_buff *
    2.21  		goto drop;
    2.22  	}
    2.23  
    2.24 -	/*
    2.25 -	 * We do not copy the packet unless:
    2.26 -	 *  1. The data -- including any in fragments -- is shared; or
    2.27 -	 *  2. The data is not allocated from our special cache.
    2.28 -	 */
    2.29 -	if (!is_flippable_skb(skb)) {
    2.30 +	/* Copy the packet here if it's destined for a flipping
    2.31 +	   interface but isn't flippable (e.g. extra references to
    2.32 +	   data)
    2.33 +	*/
    2.34 +	if (!netif->copying_receiver && !is_flippable_skb(skb)) {
    2.35  		struct sk_buff *nskb = netbk_copy_skb(skb);
    2.36  		if ( unlikely(nskb == NULL) )
    2.37  			goto drop;
    2.38 @@ -340,49 +342,74 @@ struct netrx_pending_operations {
    2.39  	unsigned trans_prod, trans_cons;
    2.40  	unsigned mmu_prod, mmu_cons;
    2.41  	unsigned mcl_prod, mcl_cons;
    2.42 +	unsigned copy_prod, copy_cons;
    2.43  	unsigned meta_prod, meta_cons;
    2.44  	mmu_update_t *mmu;
    2.45  	gnttab_transfer_t *trans;
    2.46 +	gnttab_copy_t *copy;
    2.47  	multicall_entry_t *mcl;
    2.48  	struct netbk_rx_meta *meta;
    2.49  };
    2.50  
    2.51 -static u16 netbk_gop_frag(netif_t *netif, struct page *page,
    2.52 -			  int i, struct netrx_pending_operations *npo)
    2.53 +/* Set up the grant operations for this fragment.  If it's a flipping
    2.54 +   interface, we also set up the unmap request from here. */
    2.55 +static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
    2.56 +			  int i, struct netrx_pending_operations *npo,
    2.57 +			  struct page *page, unsigned long size,
    2.58 +			  unsigned long offset)
    2.59  {
    2.60  	mmu_update_t *mmu;
    2.61  	gnttab_transfer_t *gop;
    2.62 +	gnttab_copy_t *copy_gop;
    2.63  	multicall_entry_t *mcl;
    2.64  	netif_rx_request_t *req;
    2.65  	unsigned long old_mfn, new_mfn;
    2.66  
    2.67  	old_mfn = virt_to_mfn(page_address(page));
    2.68  
    2.69 -	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    2.70 -		new_mfn = alloc_mfn();
    2.71 -
    2.72 -		/*
    2.73 -		 * Set the new P2M table entry before reassigning
    2.74 -		 * the old data page. Heed the comment in
    2.75 -		 * pgtable-2level.h:pte_page(). :-)
    2.76 -		 */
    2.77 -		set_phys_to_machine(page_to_pfn(page), new_mfn);
    2.78 +	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
    2.79 +	if (netif->copying_receiver) {
    2.80 +		/* The fragment needs to be copied rather than
    2.81 +		   flipped. */
    2.82 +		meta->copy = 1;
    2.83 +		copy_gop = npo->copy + npo->copy_prod++;
    2.84 +		copy_gop->source.domid = DOMID_SELF;
    2.85 +		copy_gop->source.offset = offset;
    2.86 +		copy_gop->source.u.gmfn = old_mfn;
    2.87 +		copy_gop->dest.domid = netif->domid;
    2.88 +		copy_gop->dest.offset = 0;
    2.89 +		copy_gop->dest.u.ref = req->gref;
    2.90 +		copy_gop->len = size;
    2.91 +		copy_gop->flags = GNTCOPY_dest_gref;
    2.92 +	} else {
    2.93 +		meta->copy = 0;
    2.94 +		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    2.95 +			new_mfn = alloc_mfn();
    2.96  
    2.97 -		mcl = npo->mcl + npo->mcl_prod++;
    2.98 -		MULTI_update_va_mapping(mcl, (unsigned long)page_address(page),
    2.99 -					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   2.100 +			/*
   2.101 +			 * Set the new P2M table entry before
   2.102 +			 * reassigning the old data page. Heed the
   2.103 +			 * comment in pgtable-2level.h:pte_page(). :-)
   2.104 +			 */
   2.105 +			set_phys_to_machine(page_to_pfn(page), new_mfn);
   2.106  
   2.107 -		mmu = npo->mmu + npo->mmu_prod++;
   2.108 -		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
   2.109 -			MMU_MACHPHYS_UPDATE;
   2.110 -		mmu->val = page_to_pfn(page);
   2.111 -	}
   2.112 +			mcl = npo->mcl + npo->mcl_prod++;
   2.113 +			MULTI_update_va_mapping(mcl,
   2.114 +					     (unsigned long)page_address(page),
   2.115 +					     pfn_pte_ma(new_mfn, PAGE_KERNEL),
   2.116 +					     0);
   2.117  
   2.118 -	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
   2.119 -	gop = npo->trans + npo->trans_prod++;
   2.120 -	gop->mfn = old_mfn;
   2.121 -	gop->domid = netif->domid;
   2.122 -	gop->ref = req->gref;
   2.123 +			mmu = npo->mmu + npo->mmu_prod++;
   2.124 +			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
   2.125 +				MMU_MACHPHYS_UPDATE;
   2.126 +			mmu->val = page_to_pfn(page);
   2.127 +		}
   2.128 +
   2.129 +		gop = npo->trans + npo->trans_prod++;
   2.130 +		gop->mfn = old_mfn;
   2.131 +		gop->domid = netif->domid;
   2.132 +		gop->ref = req->gref;
   2.133 +	}
   2.134  	return req->id;
   2.135  }
   2.136  
   2.137 @@ -403,18 +430,21 @@ static void netbk_gop_skb(struct sk_buff
   2.138  	for (i = 0; i < nr_frags; i++) {
   2.139  		meta = npo->meta + npo->meta_prod++;
   2.140  		meta->frag = skb_shinfo(skb)->frags[i];
   2.141 -		meta->id = netbk_gop_frag(netif, meta->frag.page,
   2.142 -					  i + extra, npo);
   2.143 +		meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
   2.144 +					  meta->frag.page,
   2.145 +					  meta->frag.size,
   2.146 +					  meta->frag.page_offset);
   2.147  	}
   2.148  
   2.149  	/*
   2.150  	 * This must occur at the end to ensure that we don't trash
   2.151  	 * skb_shinfo until we're done.
   2.152  	 */
   2.153 -	head_meta->id = netbk_gop_frag(netif,
   2.154 +	head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
   2.155  				       virt_to_page(skb->data),
   2.156 -				       0,
   2.157 -				       npo);
   2.158 +				       skb_headlen(skb),
   2.159 +				       offset_in_page(skb->data));
   2.160 +
   2.161  	netif->rx.req_cons += nr_frags + extra;
   2.162  }
   2.163  
   2.164 @@ -430,32 +460,43 @@ static inline void netbk_free_pages(int 
   2.165     used to set up the operations on the top of
   2.166     netrx_pending_operations, which have since been done.  Check that
   2.167     they didn't give any errors and advance over them. */
   2.168 -static int netbk_check_gop(int nr_frags, domid_t domid, int count,
   2.169 +static int netbk_check_gop(int nr_frags, domid_t domid,
   2.170  			   struct netrx_pending_operations *npo)
   2.171  {
   2.172  	multicall_entry_t *mcl;
   2.173  	gnttab_transfer_t *gop;
   2.174 +	gnttab_copy_t     *copy_op;
   2.175  	int status = NETIF_RSP_OKAY;
   2.176  	int i;
   2.177  
   2.178  	for (i = 0; i <= nr_frags; i++) {
   2.179 -		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   2.180 -			mcl = npo->mcl + npo->mcl_cons++;
   2.181 -			/* The update_va_mapping() must not fail. */
   2.182 -			BUG_ON(mcl->result != 0);
   2.183 -		}
   2.184 +		if (npo->meta[npo->meta_cons + i].copy) {
   2.185 +			copy_op = npo->copy + npo->copy_cons++;
   2.186 +			if (copy_op->status != GNTST_okay) {
   2.187 +				DPRINTK("Bad status %d from copy to DOM%d.\n",
   2.188 +					gop->status, domid);
   2.189 +				status = NETIF_RSP_ERROR;
   2.190 +			}
   2.191 +		} else {
   2.192 +			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   2.193 +				mcl = npo->mcl + npo->mcl_cons++;
   2.194 +				/* The update_va_mapping() must not fail. */
   2.195 +				BUG_ON(mcl->result != 0);
   2.196 +			}
   2.197  
   2.198 -		gop = npo->trans + npo->trans_cons++;
   2.199 -		/* Check the reassignment error code. */
   2.200 -		if (gop->status != 0) {
   2.201 -			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
   2.202 -				gop->status, domid);
   2.203 -			/*
   2.204 -			 * Page no longer belongs to us unless GNTST_bad_page,
   2.205 -			 * but that should be a fatal error anyway.
   2.206 -			 */
   2.207 -			BUG_ON(gop->status == GNTST_bad_page);
   2.208 -			status = NETIF_RSP_ERROR;
   2.209 +			gop = npo->trans + npo->trans_cons++;
   2.210 +			/* Check the reassignment error code. */
   2.211 +			if (gop->status != 0) {
   2.212 +				DPRINTK("Bad status %d from grant transfer to DOM%u\n",
   2.213 +					gop->status, domid);
   2.214 +				/*
   2.215 +				 * Page no longer belongs to us unless
   2.216 +				 * GNTST_bad_page, but that should be
   2.217 +				 * a fatal error anyway.
   2.218 +				 */
   2.219 +				BUG_ON(gop->status == GNTST_bad_page);
   2.220 +				status = NETIF_RSP_ERROR;
   2.221 +			}
   2.222  		}
   2.223  	}
   2.224  
   2.225 @@ -466,12 +507,17 @@ static void netbk_add_frag_responses(net
   2.226  				     struct netbk_rx_meta *meta, int nr_frags)
   2.227  {
   2.228  	int i;
   2.229 +	unsigned long offset;
   2.230  
   2.231  	for (i = 0; i < nr_frags; i++) {
   2.232  		int id = meta[i].id;
   2.233  		int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
   2.234  
   2.235 -		make_rx_response(netif, id, status, meta[i].frag.page_offset,
   2.236 +		if (meta[i].copy)
   2.237 +			offset = 0;
   2.238 +		else
   2.239 +			offset = meta[i].frag.page_offset;
   2.240 +		make_rx_response(netif, id, status, offset,
   2.241  				 meta[i].frag.size, flags);
   2.242  	}
   2.243  }
   2.244 @@ -482,7 +528,6 @@ static void net_rx_action(unsigned long 
   2.245  	s8 status;
   2.246  	u16 id, irq, flags;
   2.247  	netif_rx_response_t *resp;
   2.248 -	struct netif_extra_info *extra;
   2.249  	multicall_entry_t *mcl;
   2.250  	struct sk_buff_head rxq;
   2.251  	struct sk_buff *skb;
   2.252 @@ -490,6 +535,7 @@ static void net_rx_action(unsigned long 
   2.253  	int ret;
   2.254  	int nr_frags;
   2.255  	int count;
   2.256 +	unsigned long offset;
   2.257  
   2.258  	/*
   2.259  	 * Putting hundreds of bytes on the stack is considered rude.
   2.260 @@ -497,14 +543,16 @@ static void net_rx_action(unsigned long 
   2.261  	 */
   2.262  	static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
   2.263  	static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
   2.264 -	static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
   2.265 +	static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
   2.266 +	static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
   2.267  	static unsigned char rx_notify[NR_IRQS];
   2.268  	static u16 notify_list[NET_RX_RING_SIZE];
   2.269  	static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
   2.270  
   2.271  	struct netrx_pending_operations npo = {
   2.272  		mmu: rx_mmu,
   2.273 -		trans: grant_rx_op,
   2.274 +		trans: grant_trans_op,
   2.275 +		copy: grant_copy_op,
   2.276  		mcl: rx_mcl,
   2.277  		meta: meta};
   2.278  
   2.279 @@ -538,12 +586,8 @@ static void net_rx_action(unsigned long 
   2.280  			break;
   2.281  	}
   2.282  
   2.283 -	if (!count)
   2.284 -		return;
   2.285 -
   2.286 -	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   2.287 -		BUG_ON(npo.mcl_prod == 0);
   2.288 -
   2.289 +	if (npo.mcl_prod &&
   2.290 +	    !xen_feature(XENFEAT_auto_translated_physmap)) {
   2.291  		mcl = npo.mcl + npo.mcl_prod++;
   2.292  
   2.293  		BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
   2.294 @@ -551,36 +595,63 @@ static void net_rx_action(unsigned long 
   2.295  
   2.296  		mcl->op = __HYPERVISOR_mmu_update;
   2.297  		mcl->args[0] = (unsigned long)rx_mmu;
   2.298 -		mcl->args[1] = count;
   2.299 +		mcl->args[1] = npo.mmu_prod;
   2.300  		mcl->args[2] = 0;
   2.301  		mcl->args[3] = DOMID_SELF;
   2.302  	}
   2.303  
   2.304 -	mcl = npo.mcl + npo.mcl_prod++;
   2.305 -	mcl->op = __HYPERVISOR_grant_table_op;
   2.306 -	mcl->args[0] = GNTTABOP_transfer;
   2.307 -	mcl->args[1] = (unsigned long)grant_rx_op;
   2.308 -	mcl->args[2] = npo.trans_prod;
   2.309 +	if (npo.trans_prod) {
   2.310 +		mcl = npo.mcl + npo.mcl_prod++;
   2.311 +		mcl->op = __HYPERVISOR_grant_table_op;
   2.312 +		mcl->args[0] = GNTTABOP_transfer;
   2.313 +		mcl->args[1] = (unsigned long)grant_trans_op;
   2.314 +		mcl->args[2] = npo.trans_prod;
   2.315 +	}
   2.316 +
   2.317 +	if (npo.copy_prod) {
   2.318 +		mcl = npo.mcl + npo.mcl_prod++;
   2.319 +		mcl->op = __HYPERVISOR_grant_table_op;
   2.320 +		mcl->args[0] = GNTTABOP_copy;
   2.321 +		mcl->args[1] = (unsigned long)grant_copy_op;
   2.322 +		mcl->args[2] = npo.copy_prod;
   2.323 +	}
   2.324 +
   2.325 +	/* Nothing to do? */
   2.326 +	if (!npo.mcl_prod)
   2.327 +		return;
   2.328 +
   2.329 +	BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
   2.330 +	BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
   2.331 +	BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
   2.332 +	BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
   2.333 +	BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
   2.334  
   2.335  	ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
   2.336  	BUG_ON(ret != 0);
   2.337 -	BUG_ON(mcl->result != 0);
   2.338  
   2.339 -	count = 0;
   2.340  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   2.341  		nr_frags = *(int *)skb->cb;
   2.342  
   2.343 -		atomic_set(&(skb_shinfo(skb)->dataref), 1);
   2.344 -		skb_shinfo(skb)->nr_frags = 0;
   2.345 -		skb_shinfo(skb)->frag_list = NULL;
   2.346 +		netif = netdev_priv(skb->dev);
   2.347 +		/* We can't rely on skb_release_data to release the
   2.348 +		   pages used by fragments for us, since it tries to
   2.349 +		   touch the pages in the fraglist.  If we're in
   2.350 +		   flipping mode, that doesn't work.  In copying mode,
   2.351 +		   we still have access to all of the pages, and so
   2.352 +		   it's safe to let release_data deal with it. */
   2.353 +		/* (Freeing the fragments is safe since we copy
   2.354 +		   non-linear skbs destined for flipping interfaces) */
   2.355 +		if (!netif->copying_receiver) {
   2.356 +			atomic_set(&(skb_shinfo(skb)->dataref), 1);
   2.357 +			skb_shinfo(skb)->frag_list = NULL;
   2.358 +			skb_shinfo(skb)->nr_frags = 0;
   2.359 +			netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
   2.360 +		}
   2.361  
   2.362 -		netif = netdev_priv(skb->dev);
   2.363  		netif->stats.tx_bytes += skb->len;
   2.364  		netif->stats.tx_packets++;
   2.365  
   2.366 -		netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
   2.367 -		status = netbk_check_gop(nr_frags, netif->domid, count,
   2.368 -					 &npo);
   2.369 +		status = netbk_check_gop(nr_frags, netif->domid, &npo);
   2.370  
   2.371  		id = meta[npo.meta_cons].id;
   2.372  		flags = nr_frags ? NETRXF_more_data : 0;
   2.373 @@ -590,22 +661,20 @@ static void net_rx_action(unsigned long 
   2.374  		else if (skb->proto_data_valid) /* remote but checksummed? */
   2.375  			flags |= NETRXF_data_validated;
   2.376  
   2.377 -		resp = make_rx_response(netif, id, status,
   2.378 -					offset_in_page(skb->data),
   2.379 +		if (meta[npo.meta_cons].copy)
   2.380 +			offset = 0;
   2.381 +		else
   2.382 +			offset = offset_in_page(skb->data);
   2.383 +		resp = make_rx_response(netif, id, status, offset,
   2.384  					skb_headlen(skb), flags);
   2.385  
   2.386 -		extra = NULL;
   2.387 -
   2.388  		if (meta[npo.meta_cons].frag.size) {
   2.389  			struct netif_extra_info *gso =
   2.390  				(struct netif_extra_info *)
   2.391  				RING_GET_RESPONSE(&netif->rx,
   2.392  						  netif->rx.rsp_prod_pvt++);
   2.393  
   2.394 -			if (extra)
   2.395 -				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
   2.396 -			else
   2.397 -				resp->flags |= NETRXF_extra_info;
   2.398 +			resp->flags |= NETRXF_extra_info;
   2.399  
   2.400  			gso->u.gso.size = meta[npo.meta_cons].frag.size;
   2.401  			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
   2.402 @@ -614,7 +683,6 @@ static void net_rx_action(unsigned long 
   2.403  
   2.404  			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
   2.405  			gso->flags = 0;
   2.406 -			extra = gso;
   2.407  		}
   2.408  
   2.409  		netbk_add_frag_responses(netif, status,
   2.410 @@ -634,7 +702,6 @@ static void net_rx_action(unsigned long 
   2.411  
   2.412  		netif_put(netif);
   2.413  		dev_kfree_skb(skb);
   2.414 -
   2.415  		npo.meta_cons += nr_frags + 1;
   2.416  	}
   2.417  
   2.418 @@ -1253,6 +1320,12 @@ static void netif_page_release(struct pa
   2.419  	netif_idx_release(pending_idx);
   2.420  }
   2.421  
   2.422 +static void netif_rx_page_release(struct page *page)
   2.423 +{
   2.424 +	/* Ready for next use. */
   2.425 +	set_page_count(page, 1);
   2.426 +}
   2.427 +
   2.428  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   2.429  {
   2.430  	netif_t *netif = dev_id;
   2.431 @@ -1383,6 +1456,16 @@ static int __init netback_init(void)
   2.432  		SetPageForeign(page, netif_page_release);
   2.433  	}
   2.434  
   2.435 +	page = balloon_alloc_empty_page_range(NET_RX_RING_SIZE);
   2.436 +	BUG_ON(page == NULL);
   2.437 +	rx_mmap_area = pfn_to_kaddr(page_to_pfn(page));
   2.438 +
   2.439 +	for (i = 0; i < NET_RX_RING_SIZE; i++) {
   2.440 +		page = virt_to_page(rx_mmap_area + (i * PAGE_SIZE));
   2.441 +		set_page_count(page, 1);
   2.442 +		SetPageForeign(page, netif_rx_page_release);
   2.443 +	}
   2.444 +
   2.445  	pending_cons = 0;
   2.446  	pending_prod = MAX_PENDING_REQS;
   2.447  	for (i = 0; i < MAX_PENDING_REQS; i++)
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Wed Aug 16 14:27:30 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Wed Aug 16 16:01:00 2006 +0100
     3.3 @@ -108,6 +108,12 @@ static int netback_probe(struct xenbus_d
     3.4  			goto abort_transaction;
     3.5  		}
     3.6  
     3.7 +		err = xenbus_printf(xbt, dev->nodename, "feature-rx-copy", "%d", 1);
     3.8 +		if (err) {
     3.9 +			message = "writing feature-copying";
    3.10 +			goto abort_transaction;
    3.11 +		}
    3.12 +
    3.13  		err = xenbus_transaction_end(xbt, 0);
    3.14  	} while (err == -EAGAIN);
    3.15  
    3.16 @@ -349,7 +355,7 @@ static int connect_rings(struct backend_
    3.17  {
    3.18  	struct xenbus_device *dev = be->dev;
    3.19  	unsigned long tx_ring_ref, rx_ring_ref;
    3.20 -	unsigned int evtchn;
    3.21 +	unsigned int evtchn, copyall;
    3.22  	int err;
    3.23  	int val;
    3.24  
    3.25 @@ -366,6 +372,18 @@ static int connect_rings(struct backend_
    3.26  		return err;
    3.27  	}
    3.28  
    3.29 +	err = xenbus_scanf(XBT_NIL, dev->otherend, "copyall", "%u", &copyall);
    3.30 +	if (err == -ENOENT) {
    3.31 +		err = 0;
    3.32 +		copyall = 0;
    3.33 +	}
    3.34 +	if (err < 0) {
    3.35 +		xenbus_dev_fatal(dev, err, "reading %s/copyall",
    3.36 +				 dev->otherend);
    3.37 +		return err;
    3.38 +	}
    3.39 +	be->netif->copying_receiver = !!copyall;
    3.40 +
    3.41  	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d",
    3.42  			 &val) < 0)
    3.43  		val = 0;
     4.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Wed Aug 16 14:27:30 2006 +0100
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Wed Aug 16 16:01:00 2006 +0100
     4.3 @@ -199,6 +199,16 @@ MULTI_update_va_mapping(
     4.4  }
     4.5  
     4.6  static inline void
     4.7 +MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
     4.8 +		     void *uop, unsigned int count)
     4.9 +{
    4.10 +    mcl->op = __HYPERVISOR_grant_table_op;
    4.11 +    mcl->args[0] = cmd;
    4.12 +    mcl->args[1] = (unsigned long)uop;
    4.13 +    mcl->args[2] = count;
    4.14 +}
    4.15 +
    4.16 +static inline void
    4.17  MULTI_update_va_mapping_otherdomain(
    4.18      multicall_entry_t *mcl, unsigned long va,
    4.19      pte_t new_val, unsigned long flags, domid_t domid)