ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netback/interface.c @ 13991:c4ed5b740a8d

linux: remove {lock,unlock}_vm_area(). Instead use vmalloc_sync_all()
in alloc_vm_area().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Feb 16 11:24:10 2007 +0000 (2007-02-16)
parents d275951acf10
children 42b29f084c31
line source
1 /******************************************************************************
2 * arch/xen/drivers/netif/backend/interface.c
3 *
4 * Network-device interface management.
5 *
6 * Copyright (c) 2004-2005, Keir Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include "common.h"
34 #include <linux/ethtool.h>
35 #include <linux/rtnetlink.h>
37 /*
38 * Module parameter 'queue_length':
39 *
40 * Enables queuing in the network stack when a client has run out of receive
41 * descriptors. Although this feature can improve receive bandwidth by avoiding
42 * packet loss, it can also result in packets sitting in the 'tx_queue' for
43 * unbounded time. This is bad if those packets hold onto foreign resources.
44 * For example, consider a packet that holds onto resources belonging to the
45 * guest for which it is queued (e.g., packet received on vif1.0, destined for
46 * vif1.1 which is not activated in the guest): in this situation the guest
47 * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
48 * run a timer (tx_queue_timeout) to drain the queue when the interface is
49 * blocked.
50 */
51 static unsigned long netbk_queue_length = 32;
52 module_param_named(queue_length, netbk_queue_length, ulong, 0);
54 static void __netif_up(netif_t *netif)
55 {
56 enable_irq(netif->irq);
57 netif_schedule_work(netif);
58 }
60 static void __netif_down(netif_t *netif)
61 {
62 disable_irq(netif->irq);
63 netif_deschedule_work(netif);
64 }
66 static int net_open(struct net_device *dev)
67 {
68 netif_t *netif = netdev_priv(dev);
69 if (netif_carrier_ok(dev))
70 __netif_up(netif);
71 return 0;
72 }
74 static int net_close(struct net_device *dev)
75 {
76 netif_t *netif = netdev_priv(dev);
77 if (netif_carrier_ok(dev))
78 __netif_down(netif);
79 return 0;
80 }
82 static int netbk_change_mtu(struct net_device *dev, int mtu)
83 {
84 int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
86 if (mtu > max)
87 return -EINVAL;
88 dev->mtu = mtu;
89 return 0;
90 }
92 static int netbk_set_sg(struct net_device *dev, u32 data)
93 {
94 if (data) {
95 netif_t *netif = netdev_priv(dev);
97 if (!(netif->features & NETIF_F_SG))
98 return -ENOSYS;
99 }
101 return ethtool_op_set_sg(dev, data);
102 }
104 static int netbk_set_tso(struct net_device *dev, u32 data)
105 {
106 if (data) {
107 netif_t *netif = netdev_priv(dev);
109 if (!(netif->features & NETIF_F_TSO))
110 return -ENOSYS;
111 }
113 return ethtool_op_set_tso(dev, data);
114 }
116 static struct ethtool_ops network_ethtool_ops =
117 {
118 .get_tx_csum = ethtool_op_get_tx_csum,
119 .set_tx_csum = ethtool_op_set_tx_csum,
120 .get_sg = ethtool_op_get_sg,
121 .set_sg = netbk_set_sg,
122 .get_tso = ethtool_op_get_tso,
123 .set_tso = netbk_set_tso,
124 .get_link = ethtool_op_get_link,
125 };
127 netif_t *netif_alloc(domid_t domid, unsigned int handle)
128 {
129 int err = 0;
130 struct net_device *dev;
131 netif_t *netif;
132 char name[IFNAMSIZ] = {};
134 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
135 dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
136 if (dev == NULL) {
137 DPRINTK("Could not create netif: out of memory\n");
138 return ERR_PTR(-ENOMEM);
139 }
141 netif_carrier_off(dev);
143 netif = netdev_priv(dev);
144 memset(netif, 0, sizeof(*netif));
145 netif->domid = domid;
146 netif->handle = handle;
147 atomic_set(&netif->refcnt, 1);
148 init_waitqueue_head(&netif->waiting_to_free);
149 netif->dev = dev;
151 netif->credit_bytes = netif->remaining_credit = ~0UL;
152 netif->credit_usec = 0UL;
153 init_timer(&netif->credit_timeout);
154 /* Initialize 'expires' now: it's used to track the credit window. */
155 netif->credit_timeout.expires = jiffies;
157 init_timer(&netif->tx_queue_timeout);
159 dev->hard_start_xmit = netif_be_start_xmit;
160 dev->get_stats = netif_be_get_stats;
161 dev->open = net_open;
162 dev->stop = net_close;
163 dev->change_mtu = netbk_change_mtu;
164 dev->features = NETIF_F_IP_CSUM;
166 SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
168 dev->tx_queue_len = netbk_queue_length;
170 /*
171 * Initialise a dummy MAC address. We choose the numerically
172 * largest non-broadcast address to prevent the address getting
173 * stolen by an Ethernet bridge for STP purposes.
174 * (FE:FF:FF:FF:FF:FF)
175 */
176 memset(dev->dev_addr, 0xFF, ETH_ALEN);
177 dev->dev_addr[0] &= ~0x01;
179 rtnl_lock();
180 err = register_netdevice(dev);
181 rtnl_unlock();
182 if (err) {
183 DPRINTK("Could not register new net device %s: err=%d\n",
184 dev->name, err);
185 free_netdev(dev);
186 return ERR_PTR(err);
187 }
189 DPRINTK("Successfully created netif\n");
190 return netif;
191 }
193 static int map_frontend_pages(
194 netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
195 {
196 struct gnttab_map_grant_ref op;
198 gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
199 GNTMAP_host_map, tx_ring_ref, netif->domid);
201 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
202 BUG();
204 if (op.status) {
205 DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
206 return op.status;
207 }
209 netif->tx_shmem_ref = tx_ring_ref;
210 netif->tx_shmem_handle = op.handle;
212 gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
213 GNTMAP_host_map, rx_ring_ref, netif->domid);
215 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
216 BUG();
218 if (op.status) {
219 DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
220 return op.status;
221 }
223 netif->rx_shmem_ref = rx_ring_ref;
224 netif->rx_shmem_handle = op.handle;
226 return 0;
227 }
229 static void unmap_frontend_pages(netif_t *netif)
230 {
231 struct gnttab_unmap_grant_ref op;
233 gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
234 GNTMAP_host_map, netif->tx_shmem_handle);
236 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
237 BUG();
239 gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
240 GNTMAP_host_map, netif->rx_shmem_handle);
242 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
243 BUG();
244 }
246 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
247 unsigned long rx_ring_ref, unsigned int evtchn)
248 {
249 int err = -ENOMEM;
250 netif_tx_sring_t *txs;
251 netif_rx_sring_t *rxs;
253 /* Already connected through? */
254 if (netif->irq)
255 return 0;
257 netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
258 if (netif->tx_comms_area == NULL)
259 return -ENOMEM;
260 netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
261 if (netif->rx_comms_area == NULL)
262 goto err_rx;
264 err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
265 if (err)
266 goto err_map;
268 err = bind_interdomain_evtchn_to_irqhandler(
269 netif->domid, evtchn, netif_be_int, 0,
270 netif->dev->name, netif);
271 if (err < 0)
272 goto err_hypervisor;
273 netif->irq = err;
274 disable_irq(netif->irq);
276 txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
277 BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
279 rxs = (netif_rx_sring_t *)
280 ((char *)netif->rx_comms_area->addr);
281 BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
283 netif->rx_req_cons_peek = 0;
285 netif_get(netif);
287 rtnl_lock();
288 netif_carrier_on(netif->dev);
289 if (netif_running(netif->dev))
290 __netif_up(netif);
291 rtnl_unlock();
293 return 0;
294 err_hypervisor:
295 unmap_frontend_pages(netif);
296 err_map:
297 free_vm_area(netif->rx_comms_area);
298 err_rx:
299 free_vm_area(netif->tx_comms_area);
300 return err;
301 }
303 void netif_disconnect(netif_t *netif)
304 {
305 if (netif_carrier_ok(netif->dev)) {
306 rtnl_lock();
307 netif_carrier_off(netif->dev);
308 if (netif_running(netif->dev))
309 __netif_down(netif);
310 rtnl_unlock();
311 netif_put(netif);
312 }
314 atomic_dec(&netif->refcnt);
315 wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
317 del_timer_sync(&netif->credit_timeout);
318 del_timer_sync(&netif->tx_queue_timeout);
320 if (netif->irq)
321 unbind_from_irqhandler(netif->irq, netif);
323 unregister_netdev(netif->dev);
325 if (netif->tx.sring) {
326 unmap_frontend_pages(netif);
327 free_vm_area(netif->tx_comms_area);
328 free_vm_area(netif->rx_comms_area);
329 }
331 free_netdev(netif->dev);
332 }