direct-io.hg
changeset 10875:637fa5352fad
[NET] back: Transmit TSO packets if supported
This patch adds TSO transmission support to the backend.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Disable for now, as domU->dom0 direction.
Signed-off-by: Keir Fraser <keir@xensource.com>
This patch adds TSO transmission support to the backend.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Disable for now, as domU->dom0 direction.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Mon Jul 31 17:49:36 2006 +0100 (2006-07-31) |
parents | 485616ab73e3 |
children | 6d44ab88d941 |
files | linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Mon Jul 31 17:45:22 2006 +0100 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Mon Jul 31 17:49:36 2006 +0100 1.3 @@ -84,12 +84,26 @@ static int netbk_set_sg(struct net_devic 1.4 return ethtool_op_set_sg(dev, data); 1.5 } 1.6 1.7 +static int netbk_set_tso(struct net_device *dev, u32 data) 1.8 +{ 1.9 + if (data) { 1.10 + netif_t *netif = netdev_priv(dev); 1.11 + 1.12 + if (!(netif->features & NETIF_F_TSO)) 1.13 + return -ENOSYS; 1.14 + } 1.15 + 1.16 + return ethtool_op_set_tso(dev, data); 1.17 +} 1.18 + 1.19 static struct ethtool_ops network_ethtool_ops = 1.20 { 1.21 .get_tx_csum = ethtool_op_get_tx_csum, 1.22 .set_tx_csum = ethtool_op_set_tx_csum, 1.23 .get_sg = ethtool_op_get_sg, 1.24 .set_sg = netbk_set_sg, 1.25 + .get_tso = ethtool_op_get_tso, 1.26 + .set_tso = netbk_set_tso, 1.27 .get_link = ethtool_op_get_link, 1.28 }; 1.29
2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon Jul 31 17:45:22 2006 +0100 2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon Jul 31 17:49:36 2006 +0100 2.3 @@ -50,12 +50,12 @@ static void netif_page_release(struct pa 2.4 static void make_tx_response(netif_t *netif, 2.5 netif_tx_request_t *txp, 2.6 s8 st); 2.7 -static int make_rx_response(netif_t *netif, 2.8 - u16 id, 2.9 - s8 st, 2.10 - u16 offset, 2.11 - u16 size, 2.12 - u16 flags); 2.13 +static netif_rx_response_t *make_rx_response(netif_t *netif, 2.14 + u16 id, 2.15 + s8 st, 2.16 + u16 offset, 2.17 + u16 size, 2.18 + u16 flags); 2.19 2.20 static void net_tx_action(unsigned long unused); 2.21 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0); 2.22 @@ -225,9 +225,9 @@ static inline int netbk_queue_full(netif 2.23 { 2.24 RING_IDX peek = netif->rx_req_cons_peek; 2.25 2.26 - return ((netif->rx.sring->req_prod - peek) <= MAX_SKB_FRAGS) || 2.27 + return ((netif->rx.sring->req_prod - peek) <= (MAX_SKB_FRAGS + 1)) || 2.28 ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) <= 2.29 - MAX_SKB_FRAGS); 2.30 + (MAX_SKB_FRAGS + 1)); 2.31 } 2.32 2.33 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) 2.34 @@ -265,12 +265,13 @@ int netif_be_start_xmit(struct sk_buff * 2.35 skb = nskb; 2.36 } 2.37 2.38 - netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1; 2.39 + netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 + 2.40 + !!skb_shinfo(skb)->gso_size; 2.41 netif_get(netif); 2.42 2.43 if (netbk_can_queue(dev) && netbk_queue_full(netif)) { 2.44 netif->rx.sring->req_event = netif->rx_req_cons_peek + 2.45 - MAX_SKB_FRAGS + 1; 2.46 + MAX_SKB_FRAGS + 2; 2.47 mb(); /* request notification /then/ check & stop the queue */ 2.48 if (netbk_queue_full(netif)) 2.49 netif_stop_queue(dev); 2.50 @@ -347,11 +348,16 @@ static void netbk_gop_skb(struct sk_buff 2.51 netif_t *netif = netdev_priv(skb->dev); 2.52 int nr_frags = skb_shinfo(skb)->nr_frags; 2.53 int i; 2.54 + int extra; 2.55 + 2.56 + meta[count].frag.page_offset = skb_shinfo(skb)->gso_type; 2.57 + meta[count].frag.size = skb_shinfo(skb)->gso_size; 2.58 + extra = !!meta[count].frag.size + 1; 2.59 2.60 for (i = 0; i < nr_frags; i++) { 2.61 meta[++count].frag = skb_shinfo(skb)->frags[i]; 2.62 meta[count].id = netbk_gop_frag(netif, meta[count].frag.page, 2.63 - count, i + 1); 2.64 + count, i + extra); 2.65 } 2.66 2.67 /* 2.68 @@ -361,7 +367,7 @@ static void netbk_gop_skb(struct sk_buff 2.69 meta[count - nr_frags].id = netbk_gop_frag(netif, 2.70 virt_to_page(skb->data), 2.71 count - nr_frags, 0); 2.72 - netif->rx.req_cons += nr_frags + 1; 2.73 + netif->rx.req_cons += nr_frags + extra; 2.74 } 2.75 2.76 static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta) 2.77 @@ -422,6 +428,8 @@ static void net_rx_action(unsigned long 2.78 netif_t *netif = NULL; 2.79 s8 status; 2.80 u16 id, irq, flags; 2.81 + netif_rx_response_t *resp; 2.82 + struct netif_extra_info *extra; 2.83 multicall_entry_t *mcl; 2.84 struct sk_buff_head rxq; 2.85 struct sk_buff *skb; 2.86 @@ -511,8 +519,33 @@ static void net_rx_action(unsigned long 2.87 else if (skb->proto_data_valid) /* remote but checksummed? */ 2.88 flags |= NETRXF_data_validated; 2.89 2.90 - make_rx_response(netif, id, status, offset_in_page(skb->data), 2.91 - skb_headlen(skb), flags); 2.92 + resp = make_rx_response(netif, id, status, 2.93 + offset_in_page(skb->data), 2.94 + skb_headlen(skb), flags); 2.95 + 2.96 + extra = NULL; 2.97 + 2.98 + if (meta[count].frag.size) { 2.99 + struct netif_extra_info *gso = 2.100 + (struct netif_extra_info *) 2.101 + RING_GET_RESPONSE(&netif->rx, 2.102 + netif->rx.rsp_prod_pvt++); 2.103 + 2.104 + if (extra) 2.105 + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 2.106 + else 2.107 + resp->flags |= NETRXF_extra_info; 2.108 + 2.109 + gso->u.gso.size = meta[count].frag.size; 2.110 + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2.111 + gso->u.gso.pad = 0; 2.112 + gso->u.gso.features = 0; 2.113 + 2.114 + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 2.115 + gso->flags = 0; 2.116 + extra = gso; 2.117 + } 2.118 + 2.119 netbk_add_frag_responses(netif, status, meta + count + 1, 2.120 nr_frags); 2.121 2.122 @@ -1190,12 +1223,12 @@ static void make_tx_response(netif_t *ne 2.123 #endif 2.124 } 2.125 2.126 -static int make_rx_response(netif_t *netif, 2.127 - u16 id, 2.128 - s8 st, 2.129 - u16 offset, 2.130 - u16 size, 2.131 - u16 flags) 2.132 +static netif_rx_response_t *make_rx_response(netif_t *netif, 2.133 + u16 id, 2.134 + s8 st, 2.135 + u16 offset, 2.136 + u16 size, 2.137 + u16 flags) 2.138 { 2.139 RING_IDX i = netif->rx.rsp_prod_pvt; 2.140 netif_rx_response_t *resp; 2.141 @@ -1210,7 +1243,7 @@ static int make_rx_response(netif_t *net 2.142 2.143 netif->rx.rsp_prod_pvt = ++i; 2.144 2.145 - return 0; 2.146 + return resp; 2.147 } 2.148 2.149 #ifdef NETBE_DEBUG_INTERRUPT
3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 17:45:22 2006 +0100 3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Mon Jul 31 17:49:36 2006 +0100 3.3 @@ -384,6 +384,16 @@ static int connect_rings(struct backend_ 3.4 be->netif->dev->features |= NETIF_F_SG; 3.5 } 3.6 3.7 +#if 0 /* KAF: After the protocol is finalised. */ 3.8 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", 3.9 + &val) < 0) 3.10 + val = 0; 3.11 + if (val) { 3.12 + be->netif->features |= NETIF_F_TSO; 3.13 + be->netif->dev->features |= NETIF_F_TSO; 3.14 + } 3.15 +#endif 3.16 + 3.17 /* Map the shared frame, irq etc. */ 3.18 err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn); 3.19 if (err) {