ia64/xen-unstable

view linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c @ 4764:fa155c185fd6

bitkeeper revision 1.1389.5.13 (42790489SjF8fj4T9jNcZdksbNpR4g)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/smh22/xen-unstable.bk
author smh22@firebug.cl.cam.ac.uk
date Wed May 04 17:21:13 2005 +0000 (2005-05-04)
parents e238f090090a c483f767d98d
children 39bfbd5ae9b8 487de0451d2b
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 *
6 * This file may be distributed separately from the Linux kernel, or
7 * incorporated into other software packages, subject to the following license:
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this source file (the "Software"), to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
28 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/init.h>
41 #include <linux/bitops.h>
42 #include <linux/proc_fs.h>
43 #include <net/sock.h>
44 #include <net/pkt_sched.h>
45 #include <net/arp.h>
46 #include <net/route.h>
47 #include <asm/io.h>
48 #include <asm-xen/evtchn.h>
49 #include <asm-xen/ctrl_if.h>
50 #include <asm-xen/xen-public/io/netif.h>
51 #include <asm-xen/balloon.h>
52 #include <asm/page.h>
53 #include <asm/uaccess.h>
55 #ifndef __GFP_NOWARN
56 #define __GFP_NOWARN 0
57 #endif
58 #define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
60 #define init_skb_shinfo(_skb) \
61 do { \
62 atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
63 skb_shinfo(_skb)->nr_frags = 0; \
64 skb_shinfo(_skb)->frag_list = NULL; \
65 } while (0)
67 /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
68 #define RX_HEADROOM 200
70 /*
71 * If the backend driver is pipelining transmit requests then we can be very
72 * aggressive in avoiding new-packet notifications -- only need to send a
73 * notification if there are no outstanding unreceived responses.
74 * If the backend may be buffering our transmit buffers for any reason then we
75 * are rather more conservative.
76 */
77 #ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
78 #define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
79 #else
80 #define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
81 #endif
83 static void network_tx_buf_gc(struct net_device *dev);
84 static void network_alloc_rx_buffers(struct net_device *dev);
86 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
87 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
88 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
90 #ifdef CONFIG_PROC_FS
91 static int xennet_proc_init(void);
92 static int xennet_proc_addif(struct net_device *dev);
93 static void xennet_proc_delif(struct net_device *dev);
94 #else
95 #define xennet_proc_init() (0)
96 #define xennet_proc_addif(d) (0)
97 #define xennet_proc_delif(d) ((void)0)
98 #endif
100 static struct list_head dev_list;
102 struct net_private
103 {
104 struct list_head list;
105 struct net_device *dev;
107 struct net_device_stats stats;
108 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
109 unsigned int tx_full;
111 netif_tx_interface_t *tx;
112 netif_rx_interface_t *rx;
114 spinlock_t tx_lock;
115 spinlock_t rx_lock;
117 unsigned int handle;
118 unsigned int evtchn;
119 unsigned int irq;
121 /* What is the status of our connection to the remote backend? */
122 #define BEST_CLOSED 0
123 #define BEST_DISCONNECTED 1
124 #define BEST_CONNECTED 2
125 unsigned int backend_state;
127 /* Is this interface open or closed (down or up)? */
128 #define UST_CLOSED 0
129 #define UST_OPEN 1
130 unsigned int user_state;
132 /* Receive-ring batched refills. */
133 #define RX_MIN_TARGET 8
134 #define RX_MAX_TARGET NETIF_RX_RING_SIZE
135 int rx_min_target, rx_max_target, rx_target;
136 struct sk_buff_head rx_batch;
138 /*
139 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
140 * array is an index into a chain of free entries.
141 */
142 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
143 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
144 };
146 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
147 #define ADD_ID_TO_FREELIST(_list, _id) \
148 (_list)[(_id)] = (_list)[0]; \
149 (_list)[0] = (void *)(unsigned long)(_id);
150 #define GET_ID_FROM_FREELIST(_list) \
151 ({ unsigned long _id = (unsigned long)(_list)[0]; \
152 (_list)[0] = (_list)[_id]; \
153 (unsigned short)_id; })
155 static char *status_name[] = {
156 [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
157 [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
158 [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
159 [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
160 };
162 static char *be_state_name[] = {
163 [BEST_CLOSED] = "closed",
164 [BEST_DISCONNECTED] = "disconnected",
165 [BEST_CONNECTED] = "connected",
166 };
168 #if DEBUG
169 #define DPRINTK(fmt, args...) \
170 printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
171 #else
172 #define DPRINTK(fmt, args...) ((void)0)
173 #endif
174 #define IPRINTK(fmt, args...) \
175 printk(KERN_INFO "xen_net: " fmt, ##args)
176 #define WPRINTK(fmt, args...) \
177 printk(KERN_WARNING "xen_net: " fmt, ##args)
179 static struct net_device *find_dev_by_handle(unsigned int handle)
180 {
181 struct list_head *ent;
182 struct net_private *np;
183 list_for_each (ent, &dev_list) {
184 np = list_entry(ent, struct net_private, list);
185 if (np->handle == handle)
186 return np->dev;
187 }
188 return NULL;
189 }
191 /** Network interface info. */
192 struct netif_ctrl {
193 /** Number of interfaces. */
194 int interface_n;
195 /** Number of connected interfaces. */
196 int connected_n;
197 /** Error code. */
198 int err;
199 int up;
200 };
202 static struct netif_ctrl netctrl;
204 static void netctrl_init(void)
205 {
206 memset(&netctrl, 0, sizeof(netctrl));
207 netctrl.up = NETIF_DRIVER_STATUS_DOWN;
208 }
210 /** Get or set a network interface error.
211 */
212 static int netctrl_err(int err)
213 {
214 if ((err < 0) && !netctrl.err)
215 netctrl.err = err;
216 return netctrl.err;
217 }
219 /** Test if all network interfaces are connected.
220 *
221 * @return 1 if all connected, 0 if not, negative error code otherwise
222 */
223 static int netctrl_connected(void)
224 {
225 int ok;
227 if (netctrl.err)
228 ok = netctrl.err;
229 else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
230 ok = (netctrl.connected_n == netctrl.interface_n);
231 else
232 ok = 0;
234 return ok;
235 }
237 /** Count the connected network interfaces.
238 *
239 * @return connected count
240 */
241 static int netctrl_connected_count(void)
242 {
244 struct list_head *ent;
245 struct net_private *np;
246 unsigned int connected;
248 connected = 0;
250 list_for_each(ent, &dev_list) {
251 np = list_entry(ent, struct net_private, list);
252 if (np->backend_state == BEST_CONNECTED)
253 connected++;
254 }
256 netctrl.connected_n = connected;
257 DPRINTK("> connected_n=%d interface_n=%d\n",
258 netctrl.connected_n, netctrl.interface_n);
259 return connected;
260 }
262 /** Send a packet on a net device to encourage switches to learn the
263 * MAC. We send a fake ARP request.
264 *
265 * @param dev device
266 * @return 0 on success, error code otherwise
267 */
268 static int send_fake_arp(struct net_device *dev)
269 {
270 struct sk_buff *skb;
271 u32 src_ip, dst_ip;
273 dst_ip = INADDR_BROADCAST;
274 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
276 /* No IP? Then nothing to do. */
277 if (src_ip == 0)
278 return 0;
280 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
281 dst_ip, dev, src_ip,
282 /*dst_hw*/ NULL, /*src_hw*/ NULL,
283 /*target_hw*/ dev->dev_addr);
284 if (skb == NULL)
285 return -ENOMEM;
287 return dev_queue_xmit(skb);
288 }
290 static int network_open(struct net_device *dev)
291 {
292 struct net_private *np = netdev_priv(dev);
294 memset(&np->stats, 0, sizeof(np->stats));
296 np->user_state = UST_OPEN;
298 network_alloc_rx_buffers(dev);
299 np->rx->event = np->rx_resp_cons + 1;
301 netif_start_queue(dev);
303 return 0;
304 }
306 static void network_tx_buf_gc(struct net_device *dev)
307 {
308 NETIF_RING_IDX i, prod;
309 unsigned short id;
310 struct net_private *np = netdev_priv(dev);
311 struct sk_buff *skb;
313 if (np->backend_state != BEST_CONNECTED)
314 return;
316 do {
317 prod = np->tx->resp_prod;
318 rmb(); /* Ensure we see responses up to 'rp'. */
320 for (i = np->tx_resp_cons; i != prod; i++) {
321 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
322 skb = np->tx_skbs[id];
323 ADD_ID_TO_FREELIST(np->tx_skbs, id);
324 dev_kfree_skb_irq(skb);
325 }
327 np->tx_resp_cons = prod;
329 /*
330 * Set a new event, then check for race with update of tx_cons. Note
331 * that it is essential to schedule a callback, no matter how few
332 * buffers are pending. Even if there is space in the transmit ring,
333 * higher layers may be blocked because too much data is outstanding:
334 * in such cases notification from Xen is likely to be the only kick
335 * that we'll get.
336 */
337 np->tx->event =
338 prod + ((np->tx->req_prod - prod) >> 1) + 1;
339 mb();
340 } while (prod != np->tx->resp_prod);
342 if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
343 np->tx_full = 0;
344 if (np->user_state == UST_OPEN)
345 netif_wake_queue(dev);
346 }
347 }
350 static void network_alloc_rx_buffers(struct net_device *dev)
351 {
352 unsigned short id;
353 struct net_private *np = netdev_priv(dev);
354 struct sk_buff *skb;
355 int i, batch_target;
356 NETIF_RING_IDX req_prod = np->rx->req_prod;
358 if (unlikely(np->backend_state != BEST_CONNECTED))
359 return;
361 /*
362 * Allocate skbuffs greedily, even though we batch updates to the
363 * receive ring. This creates a less bursty demand on the memory allocator,
364 * so should reduce the chance of failed allocation requests both for
365 * ourself and for other kernel subsystems.
366 */
367 batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
368 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
369 if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
370 break;
371 __skb_queue_tail(&np->rx_batch, skb);
372 }
374 /* Is the batch large enough to be worthwhile? */
375 if (i < (np->rx_target/2))
376 return;
378 for (i = 0; ; i++) {
379 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
380 break;
382 skb->dev = dev;
384 id = GET_ID_FROM_FREELIST(np->rx_skbs);
386 np->rx_skbs[id] = skb;
388 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
390 rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
392 /* Remove this page from pseudo phys map before passing back to Xen. */
393 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
394 = INVALID_P2M_ENTRY;
396 rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
397 rx_mcl[i].args[0] = (unsigned long)skb->head;
398 rx_mcl[i].args[1] = 0;
399 rx_mcl[i].args[2] = 0;
400 }
402 /* After all PTEs have been zapped we blow away stale TLB entries. */
403 rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
405 /* Give away a batch of pages. */
406 rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
407 rx_mcl[i].args[0] = MEMOP_decrease_reservation;
408 rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
409 rx_mcl[i].args[2] = (unsigned long)i;
410 rx_mcl[i].args[3] = 0;
411 rx_mcl[i].args[4] = DOMID_SELF;
413 /* Tell the ballon driver what is going on. */
414 balloon_update_driver_allowance(i);
416 /* Zap PTEs and give away pages in one big multicall. */
417 (void)HYPERVISOR_multicall(rx_mcl, i+1);
419 /* Check return status of HYPERVISOR_dom_mem_op(). */
420 if (unlikely(rx_mcl[i].args[5] != i))
421 panic("Unable to reduce memory reservation\n");
423 /* Above is a suitable barrier to ensure backend will see requests. */
424 np->rx->req_prod = req_prod + i;
426 /* Adjust our floating fill target if we risked running out of buffers. */
427 if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
428 ((np->rx_target *= 2) > np->rx_max_target))
429 np->rx_target = np->rx_max_target;
430 }
433 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
434 {
435 unsigned short id;
436 struct net_private *np = netdev_priv(dev);
437 netif_tx_request_t *tx;
438 NETIF_RING_IDX i;
440 if (unlikely(np->tx_full)) {
441 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
442 netif_stop_queue(dev);
443 goto drop;
444 }
446 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
447 PAGE_SIZE)) {
448 struct sk_buff *nskb;
449 if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
450 goto drop;
451 skb_put(nskb, skb->len);
452 memcpy(nskb->data, skb->data, skb->len);
453 nskb->dev = skb->dev;
454 dev_kfree_skb(skb);
455 skb = nskb;
456 }
458 spin_lock_irq(&np->tx_lock);
460 if (np->backend_state != BEST_CONNECTED) {
461 spin_unlock_irq(&np->tx_lock);
462 goto drop;
463 }
465 i = np->tx->req_prod;
467 id = GET_ID_FROM_FREELIST(np->tx_skbs);
468 np->tx_skbs[id] = skb;
470 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
472 tx->id = id;
473 tx->addr = virt_to_machine(skb->data);
474 tx->size = skb->len;
476 wmb(); /* Ensure that backend will see the request. */
477 np->tx->req_prod = i + 1;
479 network_tx_buf_gc(dev);
481 if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
482 np->tx_full = 1;
483 netif_stop_queue(dev);
484 }
486 spin_unlock_irq(&np->tx_lock);
488 np->stats.tx_bytes += skb->len;
489 np->stats.tx_packets++;
491 /* Only notify Xen if we really have to. */
492 mb();
493 if (np->tx->TX_TEST_IDX == i)
494 notify_via_evtchn(np->evtchn);
496 return 0;
498 drop:
499 np->stats.tx_dropped++;
500 dev_kfree_skb(skb);
501 return 0;
502 }
504 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
505 {
506 struct net_device *dev = dev_id;
507 struct net_private *np = netdev_priv(dev);
508 unsigned long flags;
510 spin_lock_irqsave(&np->tx_lock, flags);
511 network_tx_buf_gc(dev);
512 spin_unlock_irqrestore(&np->tx_lock, flags);
514 if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
515 netif_rx_schedule(dev);
517 return IRQ_HANDLED;
518 }
521 static int netif_poll(struct net_device *dev, int *pbudget)
522 {
523 struct net_private *np = netdev_priv(dev);
524 struct sk_buff *skb, *nskb;
525 netif_rx_response_t *rx;
526 NETIF_RING_IDX i, rp;
527 mmu_update_t *mmu = rx_mmu;
528 multicall_entry_t *mcl = rx_mcl;
529 int work_done, budget, more_to_do = 1;
530 struct sk_buff_head rxq;
531 unsigned long flags;
533 spin_lock(&np->rx_lock);
535 if (np->backend_state != BEST_CONNECTED) {
536 spin_unlock(&np->rx_lock);
537 return 0;
538 }
540 skb_queue_head_init(&rxq);
542 if ((budget = *pbudget) > dev->quota)
543 budget = dev->quota;
545 rp = np->rx->resp_prod;
546 rmb(); /* Ensure we see queued responses up to 'rp'. */
548 for (i = np->rx_resp_cons, work_done = 0;
549 (i != rp) && (work_done < budget);
550 i++, work_done++) {
551 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
553 /*
554 * An error here is very odd. Usually indicates a backend bug,
555 * low-memory condition, or that we didn't have reservation headroom.
556 */
557 if (unlikely(rx->status <= 0)) {
558 if (net_ratelimit())
559 printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
560 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
561 wmb();
562 np->rx->req_prod++;
563 work_done--;
564 continue;
565 }
567 skb = np->rx_skbs[rx->id];
568 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
570 /* NB. We handle skb overflow later. */
571 skb->data = skb->head + (rx->addr & ~PAGE_MASK);
572 skb->len = rx->status;
573 skb->tail = skb->data + skb->len;
575 np->stats.rx_packets++;
576 np->stats.rx_bytes += rx->status;
578 /* Remap the page. */
579 mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
580 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
581 mmu++;
582 mcl->op = __HYPERVISOR_update_va_mapping;
583 mcl->args[0] = (unsigned long)skb->head;
584 mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
585 mcl->args[2] = 0;
586 mcl++;
588 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
589 rx->addr >> PAGE_SHIFT;
591 __skb_queue_tail(&rxq, skb);
592 }
594 /* Some pages are no longer absent... */
595 balloon_update_driver_allowance(-work_done);
597 /* Do all the remapping work, and M->P updates, in one big hypercall. */
598 if (likely((mcl - rx_mcl) != 0)) {
599 mcl->op = __HYPERVISOR_mmu_update;
600 mcl->args[0] = (unsigned long)rx_mmu;
601 mcl->args[1] = mmu - rx_mmu;
602 mcl->args[2] = 0;
603 mcl->args[3] = DOMID_SELF;
604 mcl++;
605 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
606 }
608 while ((skb = __skb_dequeue(&rxq)) != NULL) {
609 /*
610 * Enough room in skbuff for the data we were passed? Also, Linux
611 * expects at least 16 bytes headroom in each receive buffer.
612 */
613 if (unlikely(skb->tail > skb->end) ||
614 unlikely((skb->data - skb->head) < 16)) {
615 nskb = NULL;
617 /* Only copy the packet if it fits in the current MTU. */
618 if (skb->len <= (dev->mtu + ETH_HLEN)) {
619 if ((skb->tail > skb->end) && net_ratelimit())
620 printk(KERN_INFO "Received packet needs %d bytes more "
621 "headroom.\n", skb->tail - skb->end);
623 if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
624 skb_reserve(nskb, 2);
625 skb_put(nskb, skb->len);
626 memcpy(nskb->data, skb->data, skb->len);
627 nskb->dev = skb->dev;
628 }
629 }
630 else if (net_ratelimit())
631 printk(KERN_INFO "Received packet too big for MTU "
632 "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
634 /* Reinitialise and then destroy the old skbuff. */
635 skb->len = 0;
636 skb->tail = skb->data;
637 init_skb_shinfo(skb);
638 dev_kfree_skb(skb);
640 /* Switch old for new, if we copied the buffer. */
641 if ((skb = nskb) == NULL)
642 continue;
643 }
645 /* Set the shared-info area, which is hidden behind the real data. */
646 init_skb_shinfo(skb);
648 /* Ethernet-specific work. Delayed to here as it peeks the header. */
649 skb->protocol = eth_type_trans(skb, dev);
651 /* Pass it up. */
652 netif_receive_skb(skb);
653 dev->last_rx = jiffies;
654 }
656 np->rx_resp_cons = i;
658 /* If we get a callback with very few responses, reduce fill target. */
659 /* NB. Note exponential increase, linear decrease. */
660 if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
661 (--np->rx_target < np->rx_min_target))
662 np->rx_target = np->rx_min_target;
664 network_alloc_rx_buffers(dev);
666 *pbudget -= work_done;
667 dev->quota -= work_done;
669 if (work_done < budget) {
670 local_irq_save(flags);
672 np->rx->event = i + 1;
674 /* Deal with hypervisor racing our resetting of rx_event. */
675 mb();
676 if (np->rx->resp_prod == i) {
677 __netif_rx_complete(dev);
678 more_to_do = 0;
679 }
681 local_irq_restore(flags);
682 }
684 spin_unlock(&np->rx_lock);
686 return more_to_do;
687 }
690 static int network_close(struct net_device *dev)
691 {
692 struct net_private *np = netdev_priv(dev);
693 np->user_state = UST_CLOSED;
694 netif_stop_queue(np->dev);
695 return 0;
696 }
699 static struct net_device_stats *network_get_stats(struct net_device *dev)
700 {
701 struct net_private *np = netdev_priv(dev);
702 return &np->stats;
703 }
706 static void network_connect(struct net_device *dev,
707 netif_fe_interface_status_t *status)
708 {
709 struct net_private *np;
710 int i, requeue_idx;
711 netif_tx_request_t *tx;
713 np = netdev_priv(dev);
714 spin_lock_irq(&np->tx_lock);
715 spin_lock(&np->rx_lock);
717 /* Recovery procedure: */
719 /* Step 1: Reinitialise variables. */
720 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
721 np->rx->event = np->tx->event = 1;
723 /* Step 2: Rebuild the RX and TX ring contents.
724 * NB. We could just free the queued TX packets now but we hope
725 * that sending them out might do some good. We have to rebuild
726 * the RX ring because some of our pages are currently flipped out
727 * so we can't just free the RX skbs.
728 * NB2. Freelist index entries are always going to be less than
729 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
730 * greater than __PAGE_OFFSET: we use this property to distinguish
731 * them.
732 */
734 /* Rebuild the TX buffer freelist and the TX ring itself.
735 * NB. This reorders packets. We could keep more private state
736 * to avoid this but maybe it doesn't matter so much given the
737 * interface has been down.
738 */
739 for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
740 if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
741 struct sk_buff *skb = np->tx_skbs[i];
743 tx = &np->tx->ring[requeue_idx++].req;
745 tx->id = i;
746 tx->addr = virt_to_machine(skb->data);
747 tx->size = skb->len;
749 np->stats.tx_bytes += skb->len;
750 np->stats.tx_packets++;
751 }
752 }
753 wmb();
754 np->tx->req_prod = requeue_idx;
756 /* Rebuild the RX buffer freelist and the RX ring itself. */
757 for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
758 if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
759 np->rx->ring[requeue_idx++].req.id = i;
760 wmb();
761 np->rx->req_prod = requeue_idx;
763 /* Step 3: All public and private state should now be sane. Get
764 * ready to start sending and receiving packets and give the driver
765 * domain a kick because we've probably just requeued some
766 * packets.
767 */
768 np->backend_state = BEST_CONNECTED;
769 wmb();
770 notify_via_evtchn(status->evtchn);
771 network_tx_buf_gc(dev);
773 if (np->user_state == UST_OPEN)
774 netif_start_queue(dev);
776 spin_unlock(&np->rx_lock);
777 spin_unlock_irq(&np->tx_lock);
778 }
780 static void vif_show(struct net_private *np)
781 {
782 #if DEBUG
783 if (np) {
784 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
785 np->handle,
786 be_state_name[np->backend_state],
787 np->user_state ? "open" : "closed",
788 np->evtchn,
789 np->irq,
790 np->tx,
791 np->rx);
792 } else {
793 IPRINTK("<vif NULL>\n");
794 }
795 #endif
796 }
798 /* Send a connect message to xend to tell it to bring up the interface. */
799 static void send_interface_connect(struct net_private *np)
800 {
801 ctrl_msg_t cmsg = {
802 .type = CMSG_NETIF_FE,
803 .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
804 .length = sizeof(netif_fe_interface_connect_t),
805 };
806 netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
808 msg->handle = np->handle;
809 msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
810 msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
812 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
813 }
815 /* Send a driver status notification to the domain controller. */
816 static int send_driver_status(int ok)
817 {
818 int err = 0;
819 ctrl_msg_t cmsg = {
820 .type = CMSG_NETIF_FE,
821 .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
822 .length = sizeof(netif_fe_driver_status_t),
823 };
824 netif_fe_driver_status_t *msg = (void*)cmsg.msg;
826 msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
827 err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
828 return err;
829 }
831 /* Stop network device and free tx/rx queues and irq.
832 */
833 static void vif_release(struct net_private *np)
834 {
835 /* Stop old i/f to prevent errors whilst we rebuild the state. */
836 spin_lock_irq(&np->tx_lock);
837 spin_lock(&np->rx_lock);
838 netif_stop_queue(np->dev);
839 /* np->backend_state = BEST_DISCONNECTED; */
840 spin_unlock(&np->rx_lock);
841 spin_unlock_irq(&np->tx_lock);
843 /* Free resources. */
844 if(np->tx != NULL){
845 free_irq(np->irq, np->dev);
846 unbind_evtchn_from_irq(np->evtchn);
847 free_page((unsigned long)np->tx);
848 free_page((unsigned long)np->rx);
849 np->irq = 0;
850 np->evtchn = 0;
851 np->tx = NULL;
852 np->rx = NULL;
853 }
854 }
856 /* Release vif resources and close it down completely.
857 */
858 static void vif_close(struct net_private *np)
859 {
860 WPRINTK("Unexpected netif-CLOSED message in state %s\n",
861 be_state_name[np->backend_state]);
862 vif_release(np);
863 np->backend_state = BEST_CLOSED;
864 /* todo: take dev down and free. */
865 vif_show(np);
866 }
868 /* Move the vif into disconnected state.
869 * Allocates tx/rx pages.
870 * Sends connect message to xend.
871 */
872 static void vif_disconnect(struct net_private *np)
873 {
874 if(np->tx) free_page((unsigned long)np->tx);
875 if(np->rx) free_page((unsigned long)np->rx);
876 // Before this np->tx and np->rx had better be null.
877 np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
878 np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
879 memset(np->tx, 0, PAGE_SIZE);
880 memset(np->rx, 0, PAGE_SIZE);
881 np->backend_state = BEST_DISCONNECTED;
882 send_interface_connect(np);
883 vif_show(np);
884 }
886 /* Begin interface recovery.
887 *
888 * NB. Whilst we're recovering, we turn the carrier state off. We
889 * take measures to ensure that this device isn't used for
890 * anything. We also stop the queue for this device. Various
891 * different approaches (e.g. continuing to buffer packets) have
892 * been tested but don't appear to improve the overall impact on
893 * TCP connections.
894 *
895 * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
896 * is initiated by a special "RESET" message - disconnect could
897 * just mean we're not allowed to use this interface any more.
898 */
899 static void vif_reset(struct net_private *np)
900 {
901 IPRINTK("Attempting to reconnect network interface: handle=%u\n",
902 np->handle);
903 vif_release(np);
904 vif_disconnect(np);
905 vif_show(np);
906 }
908 /* Move the vif into connected state.
909 * Sets the mac and event channel from the message.
910 * Binds the irq to the event channel.
911 */
912 static void
913 vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
914 {
915 struct net_device *dev = np->dev;
916 memcpy(dev->dev_addr, status->mac, ETH_ALEN);
917 network_connect(dev, status);
918 np->evtchn = status->evtchn;
919 np->irq = bind_evtchn_to_irq(np->evtchn);
920 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
921 netctrl_connected_count();
922 (void)send_fake_arp(dev);
923 vif_show(np);
924 }
927 /** Create a network device.
928 * @param handle device handle
929 * @param val return parameter for created device
930 * @return 0 on success, error code otherwise
931 */
932 static int create_netdev(int handle, struct net_device **val)
933 {
934 int i, err = 0;
935 struct net_device *dev = NULL;
936 struct net_private *np = NULL;
938 if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
939 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
940 err = -ENOMEM;
941 goto exit;
942 }
944 np = netdev_priv(dev);
945 np->backend_state = BEST_CLOSED;
946 np->user_state = UST_CLOSED;
947 np->handle = handle;
949 spin_lock_init(&np->tx_lock);
950 spin_lock_init(&np->rx_lock);
952 skb_queue_head_init(&np->rx_batch);
953 np->rx_target = RX_MIN_TARGET;
954 np->rx_min_target = RX_MIN_TARGET;
955 np->rx_max_target = RX_MAX_TARGET;
957 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
958 for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
959 np->tx_skbs[i] = (void *)(i+1);
960 for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
961 np->rx_skbs[i] = (void *)(i+1);
963 dev->open = network_open;
964 dev->hard_start_xmit = network_start_xmit;
965 dev->stop = network_close;
966 dev->get_stats = network_get_stats;
967 dev->poll = netif_poll;
968 dev->weight = 64;
970 if ((err = register_netdev(dev)) != 0) {
971 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
972 goto exit;
973 }
975 if ((err = xennet_proc_addif(dev)) != 0) {
976 unregister_netdev(dev);
977 goto exit;
978 }
980 np->dev = dev;
981 list_add(&np->list, &dev_list);
983 exit:
984 if ((err != 0) && (dev != NULL))
985 kfree(dev);
986 else if (val != NULL)
987 *val = dev;
988 return err;
989 }
991 /* Get the target interface for a status message.
992 * Creates the interface when it makes sense.
993 * The returned interface may be null when there is no error.
994 *
995 * @param status status message
996 * @param np return parameter for interface state
997 * @return 0 on success, error code otherwise
998 */
999 static int
1000 target_vif(netif_fe_interface_status_t *status, struct net_private **np)
1002 int err = 0;
1003 struct net_device *dev;
1005 DPRINTK("> handle=%d\n", status->handle);
1006 if (status->handle < 0) {
1007 err = -EINVAL;
1008 goto exit;
1011 if ((dev = find_dev_by_handle(status->handle)) != NULL)
1012 goto exit;
1014 if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
1015 goto exit;
1016 if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
1017 goto exit;
1019 /* It's a new interface in a good state - create it. */
1020 DPRINTK("> create device...\n");
1021 if ((err = create_netdev(status->handle, &dev)) != 0)
1022 goto exit;
1024 netctrl.interface_n++;
1026 exit:
1027 if (np != NULL)
1028 *np = ((dev && !err) ? netdev_priv(dev) : NULL);
1029 DPRINTK("< err=%d\n", err);
1030 return err;
1033 /* Handle an interface status message. */
1034 static void netif_interface_status(netif_fe_interface_status_t *status)
1036 int err = 0;
1037 struct net_private *np = NULL;
1039 DPRINTK("> status=%s handle=%d\n",
1040 status_name[status->status], status->handle);
1042 if ((err = target_vif(status, &np)) != 0) {
1043 WPRINTK("Invalid netif: handle=%u\n", status->handle);
1044 return;
1047 if (np == NULL) {
1048 DPRINTK("> no vif\n");
1049 return;
1052 switch (status->status) {
1053 case NETIF_INTERFACE_STATUS_CLOSED:
1054 switch (np->backend_state) {
1055 case BEST_CLOSED:
1056 case BEST_DISCONNECTED:
1057 case BEST_CONNECTED:
1058 vif_close(np);
1059 break;
1061 break;
1063 case NETIF_INTERFACE_STATUS_DISCONNECTED:
1064 switch (np->backend_state) {
1065 case BEST_CLOSED:
1066 vif_disconnect(np);
1067 break;
1068 case BEST_DISCONNECTED:
1069 case BEST_CONNECTED:
1070 vif_reset(np);
1071 break;
1073 break;
1075 case NETIF_INTERFACE_STATUS_CONNECTED:
1076 switch (np->backend_state) {
1077 case BEST_CLOSED:
1078 WPRINTK("Unexpected netif status %s in state %s\n",
1079 status_name[status->status],
1080 be_state_name[np->backend_state]);
1081 vif_disconnect(np);
1082 vif_connect(np, status);
1083 break;
1084 case BEST_DISCONNECTED:
1085 vif_connect(np, status);
1086 break;
1088 break;
1090 case NETIF_INTERFACE_STATUS_CHANGED:
1091 /*
1092 * The domain controller is notifying us that a device has been
1093 * added or removed.
1094 */
1095 break;
1097 default:
1098 WPRINTK("Invalid netif status code %d\n", status->status);
1099 break;
1102 vif_show(np);
1105 /*
1106 * Initialize the network control interface.
1107 */
1108 static void netif_driver_status(netif_fe_driver_status_t *status)
1110 netctrl.up = status->status;
1111 netctrl_connected_count();
1114 /* Receive handler for control messages. */
1115 static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
1118 switch (msg->subtype) {
1119 case CMSG_NETIF_FE_INTERFACE_STATUS:
1120 netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
1121 break;
1123 case CMSG_NETIF_FE_DRIVER_STATUS:
1124 netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
1125 break;
1127 default:
1128 msg->length = 0;
1129 break;
1132 ctrl_if_send_response(msg);
1136 #if 1
1137 /* Wait for all interfaces to be connected.
1139 * This works OK, but we'd like to use the probing mode (see below).
1140 */
1141 static int probe_interfaces(void)
1143 int err = 0, conn = 0;
1144 int wait_i, wait_n = 100;
1146 DPRINTK(">\n");
1148 for (wait_i = 0; wait_i < wait_n; wait_i++) {
1149 DPRINTK("> wait_i=%d\n", wait_i);
1150 conn = netctrl_connected();
1151 if(conn) break;
1152 DPRINTK("> schedule_timeout...\n");
1153 set_current_state(TASK_INTERRUPTIBLE);
1154 schedule_timeout(10);
1157 DPRINTK("> wait finished...\n");
1158 if (conn <= 0) {
1159 err = netctrl_err(-ENETDOWN);
1160 WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
1163 DPRINTK("< err=%d\n", err);
1165 return err;
1167 #else
1168 /* Probe for interfaces until no more are found.
1170 * This is the mode we'd like to use, but at the moment it panics the kernel.
1171 */
1172 static int probe_interfaces(void)
1174 int err = 0;
1175 int wait_i, wait_n = 100;
1176 ctrl_msg_t cmsg = {
1177 .type = CMSG_NETIF_FE,
1178 .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
1179 .length = sizeof(netif_fe_interface_status_t),
1180 };
1181 netif_fe_interface_status_t msg = {};
1182 ctrl_msg_t rmsg = {};
1183 netif_fe_interface_status_t *reply = (void*)rmsg.msg;
1184 int state = TASK_UNINTERRUPTIBLE;
1185 u32 query = -1;
1187 DPRINTK(">\n");
1189 netctrl.interface_n = 0;
1190 for (wait_i = 0; wait_i < wait_n; wait_i++) {
1191 DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
1192 msg.handle = query;
1193 memcpy(cmsg.msg, &msg, sizeof(msg));
1194 DPRINTK("> set_current_state...\n");
1195 set_current_state(state);
1196 DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
1197 DPRINTK("> sending...\n");
1198 err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
1199 DPRINTK("> err=%d\n", err);
1200 if(err) goto exit;
1201 DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
1202 if((int)reply->handle < 0) {
1203 // No more interfaces.
1204 break;
1206 query = -reply->handle - 2;
1207 DPRINTK(">netif_interface_status ...\n");
1208 netif_interface_status(reply);
1211 exit:
1212 if (err) {
1213 err = netctrl_err(-ENETDOWN);
1214 WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
1217 DPRINTK("< err=%d\n", err);
1218 return err;
1221 #endif
1223 /*
1224 * We use this notifier to send out a fake ARP reply to reset switches and
1225 * router ARP caches when an IP interface is brought up on a VIF.
1226 */
1227 static int
1228 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1230 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1231 struct net_device *dev = ifa->ifa_dev->dev;
1232 struct list_head *ent;
1233 struct net_private *np;
1235 if (event != NETDEV_UP)
1236 goto out;
1238 list_for_each (ent, &dev_list) {
1239 np = list_entry(ent, struct net_private, list);
1240 if (np->dev == dev)
1241 (void)send_fake_arp(dev);
1244 out:
1245 return NOTIFY_DONE;
1248 static struct notifier_block notifier_inetdev = {
1249 .notifier_call = inetdev_notify,
1250 .next = NULL,
1251 .priority = 0
1252 };
1254 static int __init netif_init(void)
1256 int err = 0;
1258 if (xen_start_info.flags & SIF_INITDOMAIN)
1259 return 0;
1261 if ((err = xennet_proc_init()) != 0)
1262 return err;
1264 IPRINTK("Initialising virtual ethernet driver.\n");
1265 INIT_LIST_HEAD(&dev_list);
1266 (void)register_inetaddr_notifier(&notifier_inetdev);
1267 netctrl_init();
1268 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
1269 CALLBACK_IN_BLOCKING_CONTEXT);
1270 send_driver_status(1);
1271 err = probe_interfaces();
1272 if (err)
1273 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
1275 DPRINTK("< err=%d\n", err);
1276 return err;
1279 static void vif_suspend(struct net_private *np)
1281 /* Avoid having tx/rx stuff happen until we're ready. */
1282 free_irq(np->irq, np->dev);
1283 unbind_evtchn_from_irq(np->evtchn);
1286 static void vif_resume(struct net_private *np)
1288 /*
1289 * Connect regardless of whether IFF_UP flag set.
1290 * Stop bad things from happening until we're back up.
1291 */
1292 np->backend_state = BEST_DISCONNECTED;
1293 memset(np->tx, 0, PAGE_SIZE);
1294 memset(np->rx, 0, PAGE_SIZE);
1296 send_interface_connect(np);
1299 void netif_suspend(void)
1301 struct list_head *ent;
1302 struct net_private *np;
1304 list_for_each (ent, &dev_list) {
1305 np = list_entry(ent, struct net_private, list);
1306 vif_suspend(np);
1310 void netif_resume(void)
1312 struct list_head *ent;
1313 struct net_private *np;
1315 list_for_each (ent, &dev_list) {
1316 np = list_entry(ent, struct net_private, list);
1317 vif_resume(np);
1321 #ifdef CONFIG_PROC_FS
1323 #define TARGET_MIN 0UL
1324 #define TARGET_MAX 1UL
1325 #define TARGET_CUR 2UL
1327 static int xennet_proc_read(
1328 char *page, char **start, off_t off, int count, int *eof, void *data)
1330 struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
1331 struct net_private *np = netdev_priv(dev);
1332 int len = 0, which_target = (int)data & 3;
1334 switch (which_target)
1336 case TARGET_MIN:
1337 len = sprintf(page, "%d\n", np->rx_min_target);
1338 break;
1339 case TARGET_MAX:
1340 len = sprintf(page, "%d\n", np->rx_max_target);
1341 break;
1342 case TARGET_CUR:
1343 len = sprintf(page, "%d\n", np->rx_target);
1344 break;
1347 *eof = 1;
1348 return len;
1351 static int xennet_proc_write(
1352 struct file *file, const char __user *buffer,
1353 unsigned long count, void *data)
1355 struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
1356 struct net_private *np = netdev_priv(dev);
1357 int which_target = (int)data & 3;
1358 char string[64];
1359 long target;
1361 if (!capable(CAP_SYS_ADMIN))
1362 return -EPERM;
1364 if (count <= 1)
1365 return -EBADMSG; /* runt */
1366 if (count > sizeof(string))
1367 return -EFBIG; /* too long */
1369 if (copy_from_user(string, buffer, count))
1370 return -EFAULT;
1371 string[sizeof(string)-1] = '\0';
1373 target = simple_strtol(string, NULL, 10);
1374 if (target < RX_MIN_TARGET)
1375 target = RX_MIN_TARGET;
1376 if (target > RX_MAX_TARGET)
1377 target = RX_MAX_TARGET;
1379 spin_lock(&np->rx_lock);
1381 switch (which_target)
1383 case TARGET_MIN:
1384 if (target > np->rx_max_target)
1385 np->rx_max_target = target;
1386 np->rx_min_target = target;
1387 if (target > np->rx_target)
1388 np->rx_target = target;
1389 break;
1390 case TARGET_MAX:
1391 if (target < np->rx_min_target)
1392 np->rx_min_target = target;
1393 np->rx_max_target = target;
1394 if (target < np->rx_target)
1395 np->rx_target = target;
1396 break;
1397 case TARGET_CUR:
1398 break;
1401 network_alloc_rx_buffers(dev);
1403 spin_unlock(&np->rx_lock);
1405 return count;
1408 static int xennet_proc_init(void)
1410 if (proc_mkdir("xen/net", NULL) == NULL)
1411 return -ENOMEM;
1412 return 0;
1415 static int xennet_proc_addif(struct net_device *dev)
1417 struct proc_dir_entry *dir, *min, *max, *cur;
1418 char name[30];
1420 sprintf(name, "xen/net/%s", dev->name);
1422 dir = proc_mkdir(name, NULL);
1423 if (!dir)
1424 goto nomem;
1426 min = create_proc_entry("rxbuf_min", 0644, dir);
1427 max = create_proc_entry("rxbuf_max", 0644, dir);
1428 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1429 if (!min || !max || !cur)
1430 goto nomem;
1432 min->read_proc = xennet_proc_read;
1433 min->write_proc = xennet_proc_write;
1434 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1436 max->read_proc = xennet_proc_read;
1437 max->write_proc = xennet_proc_write;
1438 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1440 cur->read_proc = xennet_proc_read;
1441 cur->write_proc = xennet_proc_write;
1442 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1444 return 0;
1446 nomem:
1447 xennet_proc_delif(dev);
1448 return -ENOMEM;
1451 static void xennet_proc_delif(struct net_device *dev)
1453 char name[30];
1455 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1456 remove_proc_entry(name, NULL);
1458 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1459 remove_proc_entry(name, NULL);
1461 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1462 remove_proc_entry(name, NULL);
1464 sprintf(name, "xen/net/%s", dev->name);
1465 remove_proc_entry(name, NULL);
1468 #endif
1470 module_init(netif_init);