ia64/xen-unstable

changeset 10881:5f5a2f282032

[NET] front: Add SG support

This patch adds scatter-and-gather support to the frontend. It also
advertises this fact through xenbus so that the backend can detect
this and send through SG requests only if it is supported.

SG support is required to support skb's larger than one page. This
in turn is needed for either jumbo MTU or TSO. One of these is
required to bring local networking performance up to a level that
is acceptable.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kfraser@localhost.localdomain
date Mon Jul 31 17:35:43 2006 +0100 (2006-07-31)
parents 7bb2e0be9cc2
children 9f29252c23b6
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Mon Jul 31 17:29:00 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Mon Jul 31 17:35:43 2006 +0100
     1.3 @@ -46,11 +46,11 @@
     1.4  #include <linux/ethtool.h>
     1.5  #include <linux/in.h>
     1.6  #include <linux/if_ether.h>
     1.7 +#include <linux/io.h>
     1.8  #include <net/sock.h>
     1.9  #include <net/pkt_sched.h>
    1.10  #include <net/arp.h>
    1.11  #include <net/route.h>
    1.12 -#include <asm/io.h>
    1.13  #include <asm/uaccess.h>
    1.14  #include <xen/evtchn.h>
    1.15  #include <xen/xenbus.h>
    1.16 @@ -62,18 +62,13 @@
    1.17  #include <xen/interface/grant_table.h>
    1.18  #include <xen/gnttab.h>
    1.19  
    1.20 +#define RX_COPY_THRESHOLD 256
    1.21 +
    1.22  #define GRANT_INVALID_REF	0
    1.23  
    1.24  #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
    1.25  #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
    1.26  
    1.27 -static inline void init_skb_shinfo(struct sk_buff *skb)
    1.28 -{
    1.29 -	atomic_set(&(skb_shinfo(skb)->dataref), 1);
    1.30 -	skb_shinfo(skb)->nr_frags = 0;
    1.31 -	skb_shinfo(skb)->frag_list = NULL;
    1.32 -}
    1.33 -
    1.34  struct netfront_info {
    1.35  	struct list_head list;
    1.36  	struct net_device *netdev;
    1.37 @@ -332,6 +327,12 @@ again:
    1.38  		goto abort_transaction;
    1.39  	}
    1.40  
    1.41 +	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
    1.42 +	if (err) {
    1.43 +		message = "writing feature-sg";
    1.44 +		goto abort_transaction;
    1.45 +	}
    1.46 +
    1.47  	err = xenbus_transaction_end(xbt, 0);
    1.48  	if (err) {
    1.49  		if (err == -EAGAIN)
    1.50 @@ -575,10 +576,13 @@ static void network_alloc_rx_buffers(str
    1.51  	unsigned short id;
    1.52  	struct netfront_info *np = netdev_priv(dev);
    1.53  	struct sk_buff *skb;
    1.54 +	struct page *page;
    1.55  	int i, batch_target, notify;
    1.56  	RING_IDX req_prod = np->rx.req_prod_pvt;
    1.57  	struct xen_memory_reservation reservation;
    1.58  	grant_ref_t ref;
    1.59 + 	unsigned long pfn;
    1.60 + 	void *vaddr;
    1.61  
    1.62  	if (unlikely(!netif_carrier_ok(dev)))
    1.63  		return;
    1.64 @@ -591,15 +595,16 @@ static void network_alloc_rx_buffers(str
    1.65  	 */
    1.66  	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
    1.67  	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
    1.68 -		/*
    1.69 -		 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
    1.70 -		 * tailroom then round down to SKB_DATA_ALIGN boundary.
    1.71 -		 */
    1.72 -		skb = __dev_alloc_skb(
    1.73 -			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
    1.74 -			 (-SKB_DATA_ALIGN(1))) - 16,
    1.75 -			GFP_ATOMIC|__GFP_NOWARN);
    1.76 -		if (skb == NULL) {
    1.77 +		/* Allocate an skb and a page. */
    1.78 +		skb = __dev_alloc_skb(RX_COPY_THRESHOLD,
    1.79 +				      GFP_ATOMIC | __GFP_NOWARN);
    1.80 +		if (unlikely(!skb))
    1.81 +			goto no_skb;
    1.82 +
    1.83 +		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
    1.84 +		if (!page) {
    1.85 +			kfree_skb(skb);
    1.86 +no_skb:
    1.87  			/* Any skbuffs queued for refill? Force them out. */
    1.88  			if (i != 0)
    1.89  				goto refill;
    1.90 @@ -608,6 +613,9 @@ static void network_alloc_rx_buffers(str
    1.91  				  jiffies + (HZ/10));
    1.92  			break;
    1.93  		}
    1.94 +
    1.95 +		skb_shinfo(skb)->frags[0].page = page;
    1.96 +		skb_shinfo(skb)->nr_frags = 1;
    1.97  		__skb_queue_tail(&np->rx_batch, skb);
    1.98  	}
    1.99  
   1.100 @@ -639,18 +647,20 @@ static void network_alloc_rx_buffers(str
   1.101  		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
   1.102  		BUG_ON((signed short)ref < 0);
   1.103  		np->grant_rx_ref[id] = ref;
   1.104 +
   1.105 +		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
   1.106 +		vaddr = page_address(skb_shinfo(skb)->frags[0].page);
   1.107 +
   1.108  		gnttab_grant_foreign_transfer_ref(ref,
   1.109 -						  np->xbdev->otherend_id,
   1.110 -						  __pa(skb->head)>>PAGE_SHIFT);
   1.111 +						  np->xbdev->otherend_id, pfn);
   1.112  		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
   1.113 -		np->rx_pfn_array[i] = virt_to_mfn(skb->head);
   1.114 +		np->rx_pfn_array[i] = pfn_to_mfn(pfn);
   1.115  
   1.116  		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.117  			/* Remove this page before passing back to Xen. */
   1.118 -			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
   1.119 -					    INVALID_P2M_ENTRY);
   1.120 +			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
   1.121  			MULTI_update_va_mapping(np->rx_mcl+i,
   1.122 -						(unsigned long)skb->head,
   1.123 +						(unsigned long)vaddr,
   1.124  						__pte(0), 0);
   1.125  		}
   1.126  	}
   1.127 @@ -889,19 +899,145 @@ static void xennet_move_rx_slot(struct n
   1.128  	np->rx.req_prod_pvt++;
   1.129  }
   1.130  
   1.131 +static int xennet_get_responses(struct netfront_info *np,
   1.132 +				struct netif_rx_response *rx, RING_IDX rp,
   1.133 +				struct sk_buff_head *list, int count)
   1.134 +{
   1.135 +	struct mmu_update *mmu = np->rx_mmu + count;
   1.136 +	struct multicall_entry *mcl = np->rx_mcl + count;
   1.137 +	RING_IDX cons = np->rx.rsp_cons;
   1.138 +	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
   1.139 +	grant_ref_t ref = xennet_get_rx_ref(np, cons);
   1.140 +	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
   1.141 +	int frags = 1;
   1.142 +	int err = 0;
   1.143 +
   1.144 +	for (;;) {
   1.145 +		unsigned long mfn;
   1.146 +
   1.147 +		if (unlikely(rx->status < 0 ||
   1.148 +			     rx->offset + rx->status > PAGE_SIZE)) {
   1.149 +			if (net_ratelimit())
   1.150 +				WPRINTK("rx->offset: %x, size: %u\n",
   1.151 +					rx->offset, rx->status);
   1.152 +			err = -EINVAL;
   1.153 +		}
   1.154 +
   1.155 +		/*
   1.156 +		 * This definitely indicates a bug, either in this driver or in
   1.157 +		 * the backend driver. In future this should flag the bad
   1.158 +		 * situation to the system controller to reboot the backed.
   1.159 +		 */
   1.160 +		if (ref == GRANT_INVALID_REF) {
   1.161 +			WPRINTK("Bad rx response id %d.\n", rx->id);
   1.162 +			err = -EINVAL;
   1.163 +			goto next;
   1.164 +		}
   1.165 +
   1.166 +		/* Memory pressure, insufficient buffer headroom, ... */
   1.167 +		if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
   1.168 +			if (net_ratelimit())
   1.169 +				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
   1.170 +					rx->id, rx->status);
   1.171 +			xennet_move_rx_slot(np, skb, ref);
   1.172 +			err = -ENOMEM;
   1.173 +			goto next;
   1.174 +		}
   1.175 +
   1.176 +		gnttab_release_grant_reference(&np->gref_rx_head, ref);
   1.177 +
   1.178 +		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.179 +			/* Remap the page. */
   1.180 +			struct page *page = skb_shinfo(skb)->frags[0].page;
   1.181 +			unsigned long pfn = page_to_pfn(page);
   1.182 +			void *vaddr = page_address(page);
   1.183 +
   1.184 +			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
   1.185 +						pfn_pte_ma(mfn, PAGE_KERNEL),
   1.186 +						0);
   1.187 +			mcl++;
   1.188 +			mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
   1.189 +				| MMU_MACHPHYS_UPDATE;
   1.190 +			mmu->val = pfn;
   1.191 +			mmu++;
   1.192 +
   1.193 +			set_phys_to_machine(pfn, mfn);
   1.194 +		}
   1.195 +
   1.196 +		__skb_queue_tail(list, skb);
   1.197 +
   1.198 +next:
   1.199 +		if (!(rx->flags & NETRXF_more_data))
   1.200 +			break;
   1.201 +
   1.202 +		if (cons + frags == rp) {
   1.203 +			if (net_ratelimit())
   1.204 +				WPRINTK("Need more frags\n");
   1.205 +			err = -ENOENT;
   1.206 +			break;
   1.207 +		}
   1.208 +
   1.209 +		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
   1.210 +		skb = xennet_get_rx_skb(np, cons + frags);
   1.211 +		ref = xennet_get_rx_ref(np, cons + frags);
   1.212 +		frags++;
   1.213 +	}
   1.214 +
   1.215 +	if (unlikely(frags > max)) {
   1.216 +		if (net_ratelimit())
   1.217 +			WPRINTK("Too many frags\n");
   1.218 +		err = -E2BIG;
   1.219 +	}
   1.220 +
   1.221 +	return err;
   1.222 +}
   1.223 +
   1.224 +static RING_IDX xennet_fill_frags(struct netfront_info *np,
   1.225 +				  struct sk_buff *skb,
   1.226 +				  struct sk_buff_head *list)
   1.227 +{
   1.228 +	struct skb_shared_info *shinfo = skb_shinfo(skb);
   1.229 +	int nr_frags = shinfo->nr_frags;
   1.230 +	RING_IDX cons = np->rx.rsp_cons;
   1.231 +	skb_frag_t *frag = shinfo->frags + nr_frags;
   1.232 +	struct sk_buff *nskb;
   1.233 +
   1.234 +	while ((nskb = __skb_dequeue(list))) {
   1.235 +		struct netif_rx_response *rx =
   1.236 +			RING_GET_RESPONSE(&np->rx, ++cons);
   1.237 +
   1.238 +		frag->page = skb_shinfo(nskb)->frags[0].page;
   1.239 +		frag->page_offset = rx->offset;
   1.240 +		frag->size = rx->status;
   1.241 +
   1.242 +		skb->data_len += rx->status;
   1.243 +
   1.244 +		skb_shinfo(nskb)->nr_frags = 0;
   1.245 +		kfree_skb(nskb);
   1.246 +
   1.247 +		frag++;
   1.248 +		nr_frags++;
   1.249 +	}
   1.250 +
   1.251 +	shinfo->nr_frags = nr_frags;
   1.252 +	return cons;
   1.253 +}
   1.254 +
   1.255  static int netif_poll(struct net_device *dev, int *pbudget)
   1.256  {
   1.257  	struct netfront_info *np = netdev_priv(dev);
   1.258 -	struct sk_buff *skb, *nskb;
   1.259 +	struct sk_buff *skb;
   1.260  	struct netif_rx_response *rx;
   1.261  	RING_IDX i, rp;
   1.262 -	struct mmu_update *mmu = np->rx_mmu;
   1.263 -	struct multicall_entry *mcl = np->rx_mcl;
   1.264 +	struct multicall_entry *mcl;
   1.265  	int work_done, budget, more_to_do = 1;
   1.266  	struct sk_buff_head rxq;
   1.267 +	struct sk_buff_head errq;
   1.268 +	struct sk_buff_head tmpq;
   1.269  	unsigned long flags;
   1.270 -	unsigned long mfn;
   1.271 -	grant_ref_t ref;
   1.272 +	unsigned int len;
   1.273 +	int pages_done;
   1.274 +	int err;
   1.275  
   1.276  	spin_lock(&np->rx_lock);
   1.277  
   1.278 @@ -911,47 +1047,54 @@ static int netif_poll(struct net_device 
   1.279  	}
   1.280  
   1.281  	skb_queue_head_init(&rxq);
   1.282 +	skb_queue_head_init(&errq);
   1.283 +	skb_queue_head_init(&tmpq);
   1.284  
   1.285  	if ((budget = *pbudget) > dev->quota)
   1.286  		budget = dev->quota;
   1.287  	rp = np->rx.sring->rsp_prod;
   1.288  	rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.289  
   1.290 -	for (i = np->rx.rsp_cons, work_done = 0;
   1.291 +	for (i = np->rx.rsp_cons, work_done = 0, pages_done = 0;
   1.292  	     (i != rp) && (work_done < budget);
   1.293 -	     i++, work_done++) {
   1.294 +	     np->rx.rsp_cons = ++i, work_done++) {
   1.295  		rx = RING_GET_RESPONSE(&np->rx, i);
   1.296  
   1.297 -		skb = xennet_get_rx_skb(np, i);
   1.298 -		ref = xennet_get_rx_ref(np, i);
   1.299 +		err = xennet_get_responses(np, rx, rp, &tmpq, pages_done);
   1.300 +		pages_done += skb_queue_len(&tmpq);
   1.301  
   1.302 -		/*
   1.303 -		 * This definitely indicates a bug, either in this driver or in
   1.304 -		 * the backend driver. In future this should flag the bad
   1.305 -		 * situation to the system controller to reboot the backed.
   1.306 -		 */
   1.307 -		if (ref == GRANT_INVALID_REF) {
   1.308 -			WPRINTK("Bad rx response id %d.\n", rx->id);
   1.309 +		if (unlikely(err)) {
   1.310 +			i = np->rx.rsp_cons + skb_queue_len(&tmpq) - 1;
   1.311  			work_done--;
   1.312 +			while ((skb = __skb_dequeue(&tmpq)))
   1.313 +				__skb_queue_tail(&errq, skb);
   1.314 +			np->stats.rx_errors++;
   1.315  			continue;
   1.316  		}
   1.317  
   1.318 -		/* Memory pressure, insufficient buffer headroom, ... */
   1.319 -		if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
   1.320 -			if (net_ratelimit())
   1.321 -				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
   1.322 -					rx->id, rx->status);
   1.323 -			xennet_move_rx_slot(np, skb, ref);
   1.324 -			work_done--;
   1.325 -			continue;
   1.326 +		skb = __skb_dequeue(&tmpq);
   1.327 +
   1.328 +		skb->nh.raw = (void *)skb_shinfo(skb)->frags[0].page;
   1.329 +		skb->h.raw = skb->nh.raw + rx->offset;
   1.330 +
   1.331 +		len = rx->status;
   1.332 +		if (len > RX_COPY_THRESHOLD)
   1.333 +			len = RX_COPY_THRESHOLD;
   1.334 +		skb_put(skb, len);
   1.335 +
   1.336 +		if (rx->status > len) {
   1.337 +			skb_shinfo(skb)->frags[0].page_offset =
   1.338 +				rx->offset + len;
   1.339 +			skb_shinfo(skb)->frags[0].size = rx->status - len;
   1.340 +			skb->data_len = rx->status - len;
   1.341 +		} else {
   1.342 +			skb_shinfo(skb)->frags[0].page = NULL;
   1.343 +			skb_shinfo(skb)->nr_frags = 0;
   1.344  		}
   1.345  
   1.346 -		gnttab_release_grant_reference(&np->gref_rx_head, ref);
   1.347 -
   1.348 -		/* NB. We handle skb overflow later. */
   1.349 -		skb->data = skb->head + rx->offset;
   1.350 -		skb->len  = rx->status;
   1.351 -		skb->tail = skb->data + skb->len;
   1.352 +		i = xennet_fill_frags(np, skb, &tmpq);
   1.353 +		skb->truesize += skb->data_len;
   1.354 +		skb->len += skb->data_len;
   1.355  
   1.356  		/*
   1.357  		 * Old backends do not assert data_validated but we
   1.358 @@ -967,96 +1110,38 @@ static int netif_poll(struct net_device 
   1.359  		skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
   1.360  
   1.361  		np->stats.rx_packets++;
   1.362 -		np->stats.rx_bytes += rx->status;
   1.363 -
   1.364 -		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.365 -			/* Remap the page. */
   1.366 -			MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   1.367 -						pfn_pte_ma(mfn, PAGE_KERNEL),
   1.368 -						0);
   1.369 -			mcl++;
   1.370 -			mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
   1.371 -				| MMU_MACHPHYS_UPDATE;
   1.372 -			mmu->val = __pa(skb->head) >> PAGE_SHIFT;
   1.373 -			mmu++;
   1.374 -
   1.375 -			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
   1.376 -					    mfn);
   1.377 -		}
   1.378 +		np->stats.rx_bytes += skb->len;
   1.379  
   1.380  		__skb_queue_tail(&rxq, skb);
   1.381  	}
   1.382  
   1.383  	/* Some pages are no longer absent... */
   1.384 -	balloon_update_driver_allowance(-work_done);
   1.385 +	balloon_update_driver_allowance(-pages_done);
   1.386  
   1.387  	/* Do all the remapping work, and M2P updates, in one big hypercall. */
   1.388 -	if (likely((mcl - np->rx_mcl) != 0)) {
   1.389 +	if (likely(pages_done)) {
   1.390 +		mcl = np->rx_mcl + pages_done;
   1.391  		mcl->op = __HYPERVISOR_mmu_update;
   1.392  		mcl->args[0] = (unsigned long)np->rx_mmu;
   1.393 -		mcl->args[1] = mmu - np->rx_mmu;
   1.394 +		mcl->args[1] = pages_done;
   1.395  		mcl->args[2] = 0;
   1.396  		mcl->args[3] = DOMID_SELF;
   1.397 -		mcl++;
   1.398 -		(void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
   1.399 +		(void)HYPERVISOR_multicall(np->rx_mcl, pages_done + 1);
   1.400  	}
   1.401  
   1.402 +	while ((skb = __skb_dequeue(&errq)))
   1.403 +		kfree_skb(skb);
   1.404 +
   1.405  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   1.406 -		if (skb->len > (dev->mtu + ETH_HLEN + 4)) {
   1.407 -			if (net_ratelimit())
   1.408 -				printk(KERN_INFO "Received packet too big for "
   1.409 -				       "MTU (%d > %d)\n",
   1.410 -				       skb->len - ETH_HLEN - 4, dev->mtu);
   1.411 -			skb->len  = 0;
   1.412 -			skb->tail = skb->data;
   1.413 -			init_skb_shinfo(skb);
   1.414 -			dev_kfree_skb(skb);
   1.415 -			continue;
   1.416 -		}
   1.417 +		struct page *page = (struct page *)skb->nh.raw;
   1.418 +		void *vaddr = page_address(page);
   1.419  
   1.420 -		/*
   1.421 -		 * Enough room in skbuff for the data we were passed? Also,
   1.422 -		 * Linux expects at least 16 bytes headroom in each rx buffer.
   1.423 -		 */
   1.424 -		if (unlikely(skb->tail > skb->end) ||
   1.425 -		    unlikely((skb->data - skb->head) < 16)) {
   1.426 -			if (net_ratelimit()) {
   1.427 -				if (skb->tail > skb->end)
   1.428 -					printk(KERN_INFO "Received packet "
   1.429 -					       "is %zd bytes beyond tail.\n",
   1.430 -					       skb->tail - skb->end);
   1.431 -				else
   1.432 -					printk(KERN_INFO "Received packet "
   1.433 -					       "is %zd bytes before head.\n",
   1.434 -					       16 - (skb->data - skb->head));
   1.435 -			}
   1.436 +		memcpy(skb->data, vaddr + (skb->h.raw - skb->nh.raw),
   1.437 +		       skb_headlen(skb));
   1.438  
   1.439 -			nskb = __dev_alloc_skb(skb->len + 2,
   1.440 -					       GFP_ATOMIC|__GFP_NOWARN);
   1.441 -			if (nskb != NULL) {
   1.442 -				skb_reserve(nskb, 2);
   1.443 -				skb_put(nskb, skb->len);
   1.444 -				memcpy(nskb->data, skb->data, skb->len);
   1.445 -				/* Copy any other fields we already set up. */
   1.446 -				nskb->dev = skb->dev;
   1.447 -				nskb->ip_summed = skb->ip_summed;
   1.448 -				nskb->proto_data_valid = skb->proto_data_valid;
   1.449 -				nskb->proto_csum_blank = skb->proto_csum_blank;
   1.450 -			}
   1.451 +		if (page != skb_shinfo(skb)->frags[0].page)
   1.452 +			__free_page(page);
   1.453  
   1.454 -			/* Reinitialise and then destroy the old skbuff. */
   1.455 -			skb->len  = 0;
   1.456 -			skb->tail = skb->data;
   1.457 -			init_skb_shinfo(skb);
   1.458 -			dev_kfree_skb(skb);
   1.459 -
   1.460 -			/* Switch old for new, if we copied the buffer. */
   1.461 -			if ((skb = nskb) == NULL)
   1.462 -				continue;
   1.463 -		}
   1.464 -
   1.465 -		/* Set the shinfo area, which is hidden behind the data. */
   1.466 -		init_skb_shinfo(skb);
   1.467  		/* Ethernet work: Delayed to here as it peeks the header. */
   1.468  		skb->protocol = eth_type_trans(skb, dev);
   1.469  
   1.470 @@ -1065,8 +1150,6 @@ static int netif_poll(struct net_device 
   1.471  		dev->last_rx = jiffies;
   1.472  	}
   1.473  
   1.474 -	np->rx.rsp_cons = i;
   1.475 -
   1.476  	/* If we get a callback with very few responses, reduce fill target. */
   1.477  	/* NB. Note exponential increase, linear decrease. */
   1.478  	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
   1.479 @@ -1210,7 +1293,7 @@ static void network_connect(struct net_d
   1.480  
   1.481  		gnttab_grant_foreign_transfer_ref(
   1.482  			ref, np->xbdev->otherend_id,
   1.483 -			__pa(skb->data) >> PAGE_SHIFT);
   1.484 +			page_to_pfn(skb_shinfo(np->rx_skbs[i])->frags->page));
   1.485  
   1.486  		RING_GET_REQUEST(&np->rx, requeue_idx)->gref = ref;
   1.487  		RING_GET_REQUEST(&np->rx, requeue_idx)->id   = requeue_idx;