ia64/xen-unstable

view linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c @ 5098:36cd2ccb0e4b

bitkeeper revision 1.1518 (42923748Z36fQ-pX8ugPi-8ioexnOg)

Add ethtools support to turn on/off Tx checksum offloading in
the netfront driver.

Signed-off-by: Jon Mason <jdmason@us.ibm.com>
author bren@br260.wolfson.cam.ac.uk
date Mon May 23 20:04:24 2005 +0000 (2005-05-23)
parents 8a1faeb0d3c6
children a247de7b1fb0
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 *
6 * This file may be distributed separately from the Linux kernel, or
7 * incorporated into other software packages, subject to the following license:
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this source file (the "Software"), to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
28 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/init.h>
41 #include <linux/bitops.h>
42 #include <linux/proc_fs.h>
43 #include <linux/ethtool.h>
44 #include <net/sock.h>
45 #include <net/pkt_sched.h>
46 #include <net/arp.h>
47 #include <net/route.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm-xen/evtchn.h>
51 #include <asm-xen/ctrl_if.h>
52 #include <asm-xen/xen-public/io/netif.h>
53 #include <asm-xen/balloon.h>
54 #include <asm/page.h>
55 #include <asm/uaccess.h>
57 #ifndef __GFP_NOWARN
58 #define __GFP_NOWARN 0
59 #endif
60 #define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
62 #define init_skb_shinfo(_skb) \
63 do { \
64 atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
65 skb_shinfo(_skb)->nr_frags = 0; \
66 skb_shinfo(_skb)->frag_list = NULL; \
67 } while (0)
69 /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
70 #define RX_HEADROOM 200
72 /*
73 * If the backend driver is pipelining transmit requests then we can be very
74 * aggressive in avoiding new-packet notifications -- only need to send a
75 * notification if there are no outstanding unreceived responses.
76 * If the backend may be buffering our transmit buffers for any reason then we
77 * are rather more conservative.
78 */
79 #ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
80 #define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
81 #else
82 #define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
83 #endif
85 static void network_tx_buf_gc(struct net_device *dev);
86 static void network_alloc_rx_buffers(struct net_device *dev);
88 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
89 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
90 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
92 #ifdef CONFIG_PROC_FS
93 static int xennet_proc_init(void);
94 static int xennet_proc_addif(struct net_device *dev);
95 static void xennet_proc_delif(struct net_device *dev);
96 #else
97 #define xennet_proc_init() (0)
98 #define xennet_proc_addif(d) (0)
99 #define xennet_proc_delif(d) ((void)0)
100 #endif
102 static struct list_head dev_list;
104 struct net_private
105 {
106 struct list_head list;
107 struct net_device *dev;
109 struct net_device_stats stats;
110 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
111 unsigned int tx_full;
113 netif_tx_interface_t *tx;
114 netif_rx_interface_t *rx;
116 spinlock_t tx_lock;
117 spinlock_t rx_lock;
119 unsigned int handle;
120 unsigned int evtchn;
121 unsigned int irq;
123 /* What is the status of our connection to the remote backend? */
124 #define BEST_CLOSED 0
125 #define BEST_DISCONNECTED 1
126 #define BEST_CONNECTED 2
127 unsigned int backend_state;
129 /* Is this interface open or closed (down or up)? */
130 #define UST_CLOSED 0
131 #define UST_OPEN 1
132 unsigned int user_state;
134 /* Receive-ring batched refills. */
135 #define RX_MIN_TARGET 8
136 #define RX_MAX_TARGET NETIF_RX_RING_SIZE
137 int rx_min_target, rx_max_target, rx_target;
138 struct sk_buff_head rx_batch;
140 /*
141 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
142 * array is an index into a chain of free entries.
143 */
144 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
145 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
146 };
148 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
149 #define ADD_ID_TO_FREELIST(_list, _id) \
150 (_list)[(_id)] = (_list)[0]; \
151 (_list)[0] = (void *)(unsigned long)(_id);
152 #define GET_ID_FROM_FREELIST(_list) \
153 ({ unsigned long _id = (unsigned long)(_list)[0]; \
154 (_list)[0] = (_list)[_id]; \
155 (unsigned short)_id; })
157 static char *status_name[] = {
158 [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
159 [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
160 [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
161 [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
162 };
164 static char *be_state_name[] = {
165 [BEST_CLOSED] = "closed",
166 [BEST_DISCONNECTED] = "disconnected",
167 [BEST_CONNECTED] = "connected",
168 };
170 #if DEBUG
171 #define DPRINTK(fmt, args...) \
172 printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
173 #else
174 #define DPRINTK(fmt, args...) ((void)0)
175 #endif
176 #define IPRINTK(fmt, args...) \
177 printk(KERN_INFO "xen_net: " fmt, ##args)
178 #define WPRINTK(fmt, args...) \
179 printk(KERN_WARNING "xen_net: " fmt, ##args)
181 static struct net_device *find_dev_by_handle(unsigned int handle)
182 {
183 struct list_head *ent;
184 struct net_private *np;
185 list_for_each (ent, &dev_list) {
186 np = list_entry(ent, struct net_private, list);
187 if (np->handle == handle)
188 return np->dev;
189 }
190 return NULL;
191 }
193 /** Network interface info. */
194 struct netif_ctrl {
195 /** Number of interfaces. */
196 int interface_n;
197 /** Number of connected interfaces. */
198 int connected_n;
199 /** Error code. */
200 int err;
201 int up;
202 };
204 static struct netif_ctrl netctrl;
206 static void netctrl_init(void)
207 {
208 memset(&netctrl, 0, sizeof(netctrl));
209 netctrl.up = NETIF_DRIVER_STATUS_DOWN;
210 }
212 /** Get or set a network interface error.
213 */
214 static int netctrl_err(int err)
215 {
216 if ((err < 0) && !netctrl.err)
217 netctrl.err = err;
218 return netctrl.err;
219 }
221 /** Test if all network interfaces are connected.
222 *
223 * @return 1 if all connected, 0 if not, negative error code otherwise
224 */
225 static int netctrl_connected(void)
226 {
227 int ok;
229 if (netctrl.err)
230 ok = netctrl.err;
231 else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
232 ok = (netctrl.connected_n == netctrl.interface_n);
233 else
234 ok = 0;
236 return ok;
237 }
239 /** Count the connected network interfaces.
240 *
241 * @return connected count
242 */
243 static int netctrl_connected_count(void)
244 {
246 struct list_head *ent;
247 struct net_private *np;
248 unsigned int connected;
250 connected = 0;
252 list_for_each(ent, &dev_list) {
253 np = list_entry(ent, struct net_private, list);
254 if (np->backend_state == BEST_CONNECTED)
255 connected++;
256 }
258 netctrl.connected_n = connected;
259 DPRINTK("> connected_n=%d interface_n=%d\n",
260 netctrl.connected_n, netctrl.interface_n);
261 return connected;
262 }
264 /** Send a packet on a net device to encourage switches to learn the
265 * MAC. We send a fake ARP request.
266 *
267 * @param dev device
268 * @return 0 on success, error code otherwise
269 */
270 static int send_fake_arp(struct net_device *dev)
271 {
272 struct sk_buff *skb;
273 u32 src_ip, dst_ip;
275 dst_ip = INADDR_BROADCAST;
276 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
278 /* No IP? Then nothing to do. */
279 if (src_ip == 0)
280 return 0;
282 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
283 dst_ip, dev, src_ip,
284 /*dst_hw*/ NULL, /*src_hw*/ NULL,
285 /*target_hw*/ dev->dev_addr);
286 if (skb == NULL)
287 return -ENOMEM;
289 return dev_queue_xmit(skb);
290 }
292 static int network_open(struct net_device *dev)
293 {
294 struct net_private *np = netdev_priv(dev);
296 memset(&np->stats, 0, sizeof(np->stats));
298 np->user_state = UST_OPEN;
300 network_alloc_rx_buffers(dev);
301 np->rx->event = np->rx_resp_cons + 1;
303 netif_start_queue(dev);
305 return 0;
306 }
308 static void network_tx_buf_gc(struct net_device *dev)
309 {
310 NETIF_RING_IDX i, prod;
311 unsigned short id;
312 struct net_private *np = netdev_priv(dev);
313 struct sk_buff *skb;
315 if (np->backend_state != BEST_CONNECTED)
316 return;
318 do {
319 prod = np->tx->resp_prod;
320 rmb(); /* Ensure we see responses up to 'rp'. */
322 for (i = np->tx_resp_cons; i != prod; i++) {
323 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
324 skb = np->tx_skbs[id];
325 ADD_ID_TO_FREELIST(np->tx_skbs, id);
326 dev_kfree_skb_irq(skb);
327 }
329 np->tx_resp_cons = prod;
331 /*
332 * Set a new event, then check for race with update of tx_cons. Note
333 * that it is essential to schedule a callback, no matter how few
334 * buffers are pending. Even if there is space in the transmit ring,
335 * higher layers may be blocked because too much data is outstanding:
336 * in such cases notification from Xen is likely to be the only kick
337 * that we'll get.
338 */
339 np->tx->event =
340 prod + ((np->tx->req_prod - prod) >> 1) + 1;
341 mb();
342 } while (prod != np->tx->resp_prod);
344 if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
345 np->tx_full = 0;
346 if (np->user_state == UST_OPEN)
347 netif_wake_queue(dev);
348 }
349 }
352 static void network_alloc_rx_buffers(struct net_device *dev)
353 {
354 unsigned short id;
355 struct net_private *np = netdev_priv(dev);
356 struct sk_buff *skb;
357 int i, batch_target;
358 NETIF_RING_IDX req_prod = np->rx->req_prod;
360 if (unlikely(np->backend_state != BEST_CONNECTED))
361 return;
363 /*
364 * Allocate skbuffs greedily, even though we batch updates to the
365 * receive ring. This creates a less bursty demand on the memory allocator,
366 * so should reduce the chance of failed allocation requests both for
367 * ourself and for other kernel subsystems.
368 */
369 batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
370 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
371 if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
372 break;
373 __skb_queue_tail(&np->rx_batch, skb);
374 }
376 /* Is the batch large enough to be worthwhile? */
377 if (i < (np->rx_target/2))
378 return;
380 for (i = 0; ; i++) {
381 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
382 break;
384 skb->dev = dev;
386 id = GET_ID_FROM_FREELIST(np->rx_skbs);
388 np->rx_skbs[id] = skb;
390 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
392 rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
394 /* Remove this page from pseudo phys map before passing back to Xen. */
395 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
396 = INVALID_P2M_ENTRY;
398 rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
399 rx_mcl[i].args[0] = (unsigned long)skb->head;
400 rx_mcl[i].args[1] = 0;
401 rx_mcl[i].args[2] = 0;
402 }
404 /* After all PTEs have been zapped we blow away stale TLB entries. */
405 rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
407 /* Give away a batch of pages. */
408 rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
409 rx_mcl[i].args[0] = MEMOP_decrease_reservation;
410 rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
411 rx_mcl[i].args[2] = (unsigned long)i;
412 rx_mcl[i].args[3] = 0;
413 rx_mcl[i].args[4] = DOMID_SELF;
415 /* Tell the ballon driver what is going on. */
416 balloon_update_driver_allowance(i);
418 /* Zap PTEs and give away pages in one big multicall. */
419 (void)HYPERVISOR_multicall(rx_mcl, i+1);
421 /* Check return status of HYPERVISOR_dom_mem_op(). */
422 if (unlikely(rx_mcl[i].args[5] != i))
423 panic("Unable to reduce memory reservation\n");
425 /* Above is a suitable barrier to ensure backend will see requests. */
426 np->rx->req_prod = req_prod + i;
428 /* Adjust our floating fill target if we risked running out of buffers. */
429 if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
430 ((np->rx_target *= 2) > np->rx_max_target))
431 np->rx_target = np->rx_max_target;
432 }
435 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
436 {
437 unsigned short id;
438 struct net_private *np = netdev_priv(dev);
439 netif_tx_request_t *tx;
440 NETIF_RING_IDX i;
442 if (unlikely(np->tx_full)) {
443 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
444 netif_stop_queue(dev);
445 goto drop;
446 }
448 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
449 PAGE_SIZE)) {
450 struct sk_buff *nskb;
451 if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
452 goto drop;
453 skb_put(nskb, skb->len);
454 memcpy(nskb->data, skb->data, skb->len);
455 nskb->dev = skb->dev;
456 dev_kfree_skb(skb);
457 skb = nskb;
458 }
460 spin_lock_irq(&np->tx_lock);
462 if (np->backend_state != BEST_CONNECTED) {
463 spin_unlock_irq(&np->tx_lock);
464 goto drop;
465 }
467 i = np->tx->req_prod;
469 id = GET_ID_FROM_FREELIST(np->tx_skbs);
470 np->tx_skbs[id] = skb;
472 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
474 tx->id = id;
475 tx->addr = virt_to_machine(skb->data);
476 tx->size = skb->len;
477 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
479 wmb(); /* Ensure that backend will see the request. */
480 np->tx->req_prod = i + 1;
482 network_tx_buf_gc(dev);
484 if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
485 np->tx_full = 1;
486 netif_stop_queue(dev);
487 }
489 spin_unlock_irq(&np->tx_lock);
491 np->stats.tx_bytes += skb->len;
492 np->stats.tx_packets++;
494 /* Only notify Xen if we really have to. */
495 mb();
496 if (np->tx->TX_TEST_IDX == i)
497 notify_via_evtchn(np->evtchn);
499 return 0;
501 drop:
502 np->stats.tx_dropped++;
503 dev_kfree_skb(skb);
504 return 0;
505 }
507 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
508 {
509 struct net_device *dev = dev_id;
510 struct net_private *np = netdev_priv(dev);
511 unsigned long flags;
513 spin_lock_irqsave(&np->tx_lock, flags);
514 network_tx_buf_gc(dev);
515 spin_unlock_irqrestore(&np->tx_lock, flags);
517 if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
518 netif_rx_schedule(dev);
520 return IRQ_HANDLED;
521 }
524 static int netif_poll(struct net_device *dev, int *pbudget)
525 {
526 struct net_private *np = netdev_priv(dev);
527 struct sk_buff *skb, *nskb;
528 netif_rx_response_t *rx;
529 NETIF_RING_IDX i, rp;
530 mmu_update_t *mmu = rx_mmu;
531 multicall_entry_t *mcl = rx_mcl;
532 int work_done, budget, more_to_do = 1;
533 struct sk_buff_head rxq;
534 unsigned long flags;
536 spin_lock(&np->rx_lock);
538 if (np->backend_state != BEST_CONNECTED) {
539 spin_unlock(&np->rx_lock);
540 return 0;
541 }
543 skb_queue_head_init(&rxq);
545 if ((budget = *pbudget) > dev->quota)
546 budget = dev->quota;
548 rp = np->rx->resp_prod;
549 rmb(); /* Ensure we see queued responses up to 'rp'. */
551 for (i = np->rx_resp_cons, work_done = 0;
552 (i != rp) && (work_done < budget);
553 i++, work_done++) {
554 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
556 /*
557 * An error here is very odd. Usually indicates a backend bug,
558 * low-memory condition, or that we didn't have reservation headroom.
559 */
560 if (unlikely(rx->status <= 0)) {
561 if (net_ratelimit())
562 printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
563 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
564 wmb();
565 np->rx->req_prod++;
566 work_done--;
567 continue;
568 }
570 skb = np->rx_skbs[rx->id];
571 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
573 /* NB. We handle skb overflow later. */
574 skb->data = skb->head + (rx->addr & ~PAGE_MASK);
575 skb->len = rx->status;
576 skb->tail = skb->data + skb->len;
578 if ( rx->csum_valid )
579 skb->ip_summed = CHECKSUM_UNNECESSARY;
581 np->stats.rx_packets++;
582 np->stats.rx_bytes += rx->status;
584 /* Remap the page. */
585 mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
586 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
587 mmu++;
588 mcl->op = __HYPERVISOR_update_va_mapping;
589 mcl->args[0] = (unsigned long)skb->head;
590 mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
591 mcl->args[2] = 0;
592 mcl++;
594 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
595 rx->addr >> PAGE_SHIFT;
597 __skb_queue_tail(&rxq, skb);
598 }
600 /* Some pages are no longer absent... */
601 balloon_update_driver_allowance(-work_done);
603 /* Do all the remapping work, and M->P updates, in one big hypercall. */
604 if (likely((mcl - rx_mcl) != 0)) {
605 mcl->op = __HYPERVISOR_mmu_update;
606 mcl->args[0] = (unsigned long)rx_mmu;
607 mcl->args[1] = mmu - rx_mmu;
608 mcl->args[2] = 0;
609 mcl->args[3] = DOMID_SELF;
610 mcl++;
611 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
612 }
614 while ((skb = __skb_dequeue(&rxq)) != NULL) {
615 /*
616 * Enough room in skbuff for the data we were passed? Also, Linux
617 * expects at least 16 bytes headroom in each receive buffer.
618 */
619 if (unlikely(skb->tail > skb->end) ||
620 unlikely((skb->data - skb->head) < 16)) {
621 nskb = NULL;
623 /* Only copy the packet if it fits in the current MTU. */
624 if (skb->len <= (dev->mtu + ETH_HLEN)) {
625 if ((skb->tail > skb->end) && net_ratelimit())
626 printk(KERN_INFO "Received packet needs %d bytes more "
627 "headroom.\n", skb->tail - skb->end);
629 if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
630 skb_reserve(nskb, 2);
631 skb_put(nskb, skb->len);
632 memcpy(nskb->data, skb->data, skb->len);
633 nskb->dev = skb->dev;
634 }
635 }
636 else if (net_ratelimit())
637 printk(KERN_INFO "Received packet too big for MTU "
638 "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
640 /* Reinitialise and then destroy the old skbuff. */
641 skb->len = 0;
642 skb->tail = skb->data;
643 init_skb_shinfo(skb);
644 dev_kfree_skb(skb);
646 /* Switch old for new, if we copied the buffer. */
647 if ((skb = nskb) == NULL)
648 continue;
649 }
651 /* Set the shared-info area, which is hidden behind the real data. */
652 init_skb_shinfo(skb);
654 /* Ethernet-specific work. Delayed to here as it peeks the header. */
655 skb->protocol = eth_type_trans(skb, dev);
657 /* Pass it up. */
658 netif_receive_skb(skb);
659 dev->last_rx = jiffies;
660 }
662 np->rx_resp_cons = i;
664 /* If we get a callback with very few responses, reduce fill target. */
665 /* NB. Note exponential increase, linear decrease. */
666 if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
667 (--np->rx_target < np->rx_min_target))
668 np->rx_target = np->rx_min_target;
670 network_alloc_rx_buffers(dev);
672 *pbudget -= work_done;
673 dev->quota -= work_done;
675 if (work_done < budget) {
676 local_irq_save(flags);
678 np->rx->event = i + 1;
680 /* Deal with hypervisor racing our resetting of rx_event. */
681 mb();
682 if (np->rx->resp_prod == i) {
683 __netif_rx_complete(dev);
684 more_to_do = 0;
685 }
687 local_irq_restore(flags);
688 }
690 spin_unlock(&np->rx_lock);
692 return more_to_do;
693 }
696 static int network_close(struct net_device *dev)
697 {
698 struct net_private *np = netdev_priv(dev);
699 np->user_state = UST_CLOSED;
700 netif_stop_queue(np->dev);
701 return 0;
702 }
705 static struct net_device_stats *network_get_stats(struct net_device *dev)
706 {
707 struct net_private *np = netdev_priv(dev);
708 return &np->stats;
709 }
712 static void network_connect(struct net_device *dev,
713 netif_fe_interface_status_t *status)
714 {
715 struct net_private *np;
716 int i, requeue_idx;
717 netif_tx_request_t *tx;
719 np = netdev_priv(dev);
720 spin_lock_irq(&np->tx_lock);
721 spin_lock(&np->rx_lock);
723 /* Recovery procedure: */
725 /* Step 1: Reinitialise variables. */
726 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
727 np->rx->event = np->tx->event = 1;
729 /* Step 2: Rebuild the RX and TX ring contents.
730 * NB. We could just free the queued TX packets now but we hope
731 * that sending them out might do some good. We have to rebuild
732 * the RX ring because some of our pages are currently flipped out
733 * so we can't just free the RX skbs.
734 * NB2. Freelist index entries are always going to be less than
735 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
736 * greater than __PAGE_OFFSET: we use this property to distinguish
737 * them.
738 */
740 /* Rebuild the TX buffer freelist and the TX ring itself.
741 * NB. This reorders packets. We could keep more private state
742 * to avoid this but maybe it doesn't matter so much given the
743 * interface has been down.
744 */
745 for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
746 if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
747 struct sk_buff *skb = np->tx_skbs[i];
749 tx = &np->tx->ring[requeue_idx++].req;
751 tx->id = i;
752 tx->addr = virt_to_machine(skb->data);
753 tx->size = skb->len;
755 np->stats.tx_bytes += skb->len;
756 np->stats.tx_packets++;
757 }
758 }
759 wmb();
760 np->tx->req_prod = requeue_idx;
762 /* Rebuild the RX buffer freelist and the RX ring itself. */
763 for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
764 if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
765 np->rx->ring[requeue_idx++].req.id = i;
766 wmb();
767 np->rx->req_prod = requeue_idx;
769 /* Step 3: All public and private state should now be sane. Get
770 * ready to start sending and receiving packets and give the driver
771 * domain a kick because we've probably just requeued some
772 * packets.
773 */
774 np->backend_state = BEST_CONNECTED;
775 wmb();
776 notify_via_evtchn(status->evtchn);
777 network_tx_buf_gc(dev);
779 if (np->user_state == UST_OPEN)
780 netif_start_queue(dev);
782 spin_unlock(&np->rx_lock);
783 spin_unlock_irq(&np->tx_lock);
784 }
786 static void vif_show(struct net_private *np)
787 {
788 #if DEBUG
789 if (np) {
790 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
791 np->handle,
792 be_state_name[np->backend_state],
793 np->user_state ? "open" : "closed",
794 np->evtchn,
795 np->irq,
796 np->tx,
797 np->rx);
798 } else {
799 IPRINTK("<vif NULL>\n");
800 }
801 #endif
802 }
804 /* Send a connect message to xend to tell it to bring up the interface. */
805 static void send_interface_connect(struct net_private *np)
806 {
807 ctrl_msg_t cmsg = {
808 .type = CMSG_NETIF_FE,
809 .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
810 .length = sizeof(netif_fe_interface_connect_t),
811 };
812 netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
814 msg->handle = np->handle;
815 msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
816 msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
818 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
819 }
821 /* Send a driver status notification to the domain controller. */
822 static int send_driver_status(int ok)
823 {
824 int err = 0;
825 ctrl_msg_t cmsg = {
826 .type = CMSG_NETIF_FE,
827 .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
828 .length = sizeof(netif_fe_driver_status_t),
829 };
830 netif_fe_driver_status_t *msg = (void*)cmsg.msg;
832 msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
833 err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
834 return err;
835 }
837 /* Stop network device and free tx/rx queues and irq.
838 */
839 static void vif_release(struct net_private *np)
840 {
841 /* Stop old i/f to prevent errors whilst we rebuild the state. */
842 spin_lock_irq(&np->tx_lock);
843 spin_lock(&np->rx_lock);
844 netif_stop_queue(np->dev);
845 /* np->backend_state = BEST_DISCONNECTED; */
846 spin_unlock(&np->rx_lock);
847 spin_unlock_irq(&np->tx_lock);
849 /* Free resources. */
850 if(np->tx != NULL){
851 free_irq(np->irq, np->dev);
852 unbind_evtchn_from_irq(np->evtchn);
853 free_page((unsigned long)np->tx);
854 free_page((unsigned long)np->rx);
855 np->irq = 0;
856 np->evtchn = 0;
857 np->tx = NULL;
858 np->rx = NULL;
859 }
860 }
862 /* Release vif resources and close it down completely.
863 */
864 static void vif_close(struct net_private *np)
865 {
866 WPRINTK("Unexpected netif-CLOSED message in state %s\n",
867 be_state_name[np->backend_state]);
868 vif_release(np);
869 np->backend_state = BEST_CLOSED;
870 /* todo: take dev down and free. */
871 vif_show(np);
872 }
874 /* Move the vif into disconnected state.
875 * Allocates tx/rx pages.
876 * Sends connect message to xend.
877 */
878 static void vif_disconnect(struct net_private *np)
879 {
880 if(np->tx) free_page((unsigned long)np->tx);
881 if(np->rx) free_page((unsigned long)np->rx);
882 // Before this np->tx and np->rx had better be null.
883 np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
884 np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
885 memset(np->tx, 0, PAGE_SIZE);
886 memset(np->rx, 0, PAGE_SIZE);
887 np->backend_state = BEST_DISCONNECTED;
888 send_interface_connect(np);
889 vif_show(np);
890 }
892 /* Begin interface recovery.
893 *
894 * NB. Whilst we're recovering, we turn the carrier state off. We
895 * take measures to ensure that this device isn't used for
896 * anything. We also stop the queue for this device. Various
897 * different approaches (e.g. continuing to buffer packets) have
898 * been tested but don't appear to improve the overall impact on
899 * TCP connections.
900 *
901 * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
902 * is initiated by a special "RESET" message - disconnect could
903 * just mean we're not allowed to use this interface any more.
904 */
905 static void vif_reset(struct net_private *np)
906 {
907 IPRINTK("Attempting to reconnect network interface: handle=%u\n",
908 np->handle);
909 vif_release(np);
910 vif_disconnect(np);
911 vif_show(np);
912 }
914 /* Move the vif into connected state.
915 * Sets the mac and event channel from the message.
916 * Binds the irq to the event channel.
917 */
918 static void
919 vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
920 {
921 struct net_device *dev = np->dev;
922 memcpy(dev->dev_addr, status->mac, ETH_ALEN);
923 network_connect(dev, status);
924 np->evtchn = status->evtchn;
925 np->irq = bind_evtchn_to_irq(np->evtchn);
926 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
927 netctrl_connected_count();
928 (void)send_fake_arp(dev);
929 vif_show(np);
930 }
932 static struct ethtool_ops network_ethtool_ops =
933 {
934 .get_tx_csum = ethtool_op_get_tx_csum,
935 .set_tx_csum = ethtool_op_set_tx_csum,
936 };
938 /** Create a network device.
939 * @param handle device handle
940 * @param val return parameter for created device
941 * @return 0 on success, error code otherwise
942 */
943 static int create_netdev(int handle, struct net_device **val)
944 {
945 int i, err = 0;
946 struct net_device *dev = NULL;
947 struct net_private *np = NULL;
949 if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
950 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
951 err = -ENOMEM;
952 goto exit;
953 }
955 np = netdev_priv(dev);
956 np->backend_state = BEST_CLOSED;
957 np->user_state = UST_CLOSED;
958 np->handle = handle;
960 spin_lock_init(&np->tx_lock);
961 spin_lock_init(&np->rx_lock);
963 skb_queue_head_init(&np->rx_batch);
964 np->rx_target = RX_MIN_TARGET;
965 np->rx_min_target = RX_MIN_TARGET;
966 np->rx_max_target = RX_MAX_TARGET;
968 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
969 for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
970 np->tx_skbs[i] = (void *)(i+1);
971 for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
972 np->rx_skbs[i] = (void *)(i+1);
974 dev->open = network_open;
975 dev->hard_start_xmit = network_start_xmit;
976 dev->stop = network_close;
977 dev->get_stats = network_get_stats;
978 dev->poll = netif_poll;
979 dev->weight = 64;
980 dev->features = NETIF_F_IP_CSUM;
982 SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
984 if ((err = register_netdev(dev)) != 0) {
985 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
986 goto exit;
987 }
989 if ((err = xennet_proc_addif(dev)) != 0) {
990 unregister_netdev(dev);
991 goto exit;
992 }
994 np->dev = dev;
995 list_add(&np->list, &dev_list);
997 exit:
998 if ((err != 0) && (dev != NULL))
999 kfree(dev);
1000 else if (val != NULL)
1001 *val = dev;
1002 return err;
1005 /* Get the target interface for a status message.
1006 * Creates the interface when it makes sense.
1007 * The returned interface may be null when there is no error.
1009 * @param status status message
1010 * @param np return parameter for interface state
1011 * @return 0 on success, error code otherwise
1012 */
1013 static int
1014 target_vif(netif_fe_interface_status_t *status, struct net_private **np)
1016 int err = 0;
1017 struct net_device *dev;
1019 DPRINTK("> handle=%d\n", status->handle);
1020 if (status->handle < 0) {
1021 err = -EINVAL;
1022 goto exit;
1025 if ((dev = find_dev_by_handle(status->handle)) != NULL)
1026 goto exit;
1028 if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
1029 goto exit;
1030 if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
1031 goto exit;
1033 /* It's a new interface in a good state - create it. */
1034 DPRINTK("> create device...\n");
1035 if ((err = create_netdev(status->handle, &dev)) != 0)
1036 goto exit;
1038 netctrl.interface_n++;
1040 exit:
1041 if (np != NULL)
1042 *np = ((dev && !err) ? netdev_priv(dev) : NULL);
1043 DPRINTK("< err=%d\n", err);
1044 return err;
1047 /* Handle an interface status message. */
1048 static void netif_interface_status(netif_fe_interface_status_t *status)
1050 int err = 0;
1051 struct net_private *np = NULL;
1053 DPRINTK("> status=%s handle=%d\n",
1054 status_name[status->status], status->handle);
1056 if ((err = target_vif(status, &np)) != 0) {
1057 WPRINTK("Invalid netif: handle=%u\n", status->handle);
1058 return;
1061 if (np == NULL) {
1062 DPRINTK("> no vif\n");
1063 return;
1066 switch (status->status) {
1067 case NETIF_INTERFACE_STATUS_CLOSED:
1068 switch (np->backend_state) {
1069 case BEST_CLOSED:
1070 case BEST_DISCONNECTED:
1071 case BEST_CONNECTED:
1072 vif_close(np);
1073 break;
1075 break;
1077 case NETIF_INTERFACE_STATUS_DISCONNECTED:
1078 switch (np->backend_state) {
1079 case BEST_CLOSED:
1080 vif_disconnect(np);
1081 break;
1082 case BEST_DISCONNECTED:
1083 case BEST_CONNECTED:
1084 vif_reset(np);
1085 break;
1087 break;
1089 case NETIF_INTERFACE_STATUS_CONNECTED:
1090 switch (np->backend_state) {
1091 case BEST_CLOSED:
1092 WPRINTK("Unexpected netif status %s in state %s\n",
1093 status_name[status->status],
1094 be_state_name[np->backend_state]);
1095 vif_disconnect(np);
1096 vif_connect(np, status);
1097 break;
1098 case BEST_DISCONNECTED:
1099 vif_connect(np, status);
1100 break;
1102 break;
1104 case NETIF_INTERFACE_STATUS_CHANGED:
1105 /*
1106 * The domain controller is notifying us that a device has been
1107 * added or removed.
1108 */
1109 break;
1111 default:
1112 WPRINTK("Invalid netif status code %d\n", status->status);
1113 break;
1116 vif_show(np);
1119 /*
1120 * Initialize the network control interface.
1121 */
1122 static void netif_driver_status(netif_fe_driver_status_t *status)
1124 netctrl.up = status->status;
1125 netctrl_connected_count();
1128 /* Receive handler for control messages. */
1129 static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
1132 switch (msg->subtype) {
1133 case CMSG_NETIF_FE_INTERFACE_STATUS:
1134 netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
1135 break;
1137 case CMSG_NETIF_FE_DRIVER_STATUS:
1138 netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
1139 break;
1141 default:
1142 msg->length = 0;
1143 break;
1146 ctrl_if_send_response(msg);
1150 #if 1
1151 /* Wait for all interfaces to be connected.
1153 * This works OK, but we'd like to use the probing mode (see below).
1154 */
1155 static int probe_interfaces(void)
1157 int err = 0, conn = 0;
1158 int wait_i, wait_n = 100;
1160 DPRINTK(">\n");
1162 for (wait_i = 0; wait_i < wait_n; wait_i++) {
1163 DPRINTK("> wait_i=%d\n", wait_i);
1164 conn = netctrl_connected();
1165 if(conn) break;
1166 DPRINTK("> schedule_timeout...\n");
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 schedule_timeout(10);
1171 DPRINTK("> wait finished...\n");
1172 if (conn <= 0) {
1173 err = netctrl_err(-ENETDOWN);
1174 WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
1177 DPRINTK("< err=%d\n", err);
1179 return err;
1181 #else
1182 /* Probe for interfaces until no more are found.
1184 * This is the mode we'd like to use, but at the moment it panics the kernel.
1185 */
1186 static int probe_interfaces(void)
1188 int err = 0;
1189 int wait_i, wait_n = 100;
1190 ctrl_msg_t cmsg = {
1191 .type = CMSG_NETIF_FE,
1192 .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
1193 .length = sizeof(netif_fe_interface_status_t),
1194 };
1195 netif_fe_interface_status_t msg = {};
1196 ctrl_msg_t rmsg = {};
1197 netif_fe_interface_status_t *reply = (void*)rmsg.msg;
1198 int state = TASK_UNINTERRUPTIBLE;
1199 u32 query = -1;
1201 DPRINTK(">\n");
1203 netctrl.interface_n = 0;
1204 for (wait_i = 0; wait_i < wait_n; wait_i++) {
1205 DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
1206 msg.handle = query;
1207 memcpy(cmsg.msg, &msg, sizeof(msg));
1208 DPRINTK("> set_current_state...\n");
1209 set_current_state(state);
1210 DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
1211 DPRINTK("> sending...\n");
1212 err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
1213 DPRINTK("> err=%d\n", err);
1214 if(err) goto exit;
1215 DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
1216 if((int)reply->handle < 0) {
1217 // No more interfaces.
1218 break;
1220 query = -reply->handle - 2;
1221 DPRINTK(">netif_interface_status ...\n");
1222 netif_interface_status(reply);
1225 exit:
1226 if (err) {
1227 err = netctrl_err(-ENETDOWN);
1228 WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
1231 DPRINTK("< err=%d\n", err);
1232 return err;
1235 #endif
1237 /*
1238 * We use this notifier to send out a fake ARP reply to reset switches and
1239 * router ARP caches when an IP interface is brought up on a VIF.
1240 */
1241 static int
1242 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1244 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1245 struct net_device *dev = ifa->ifa_dev->dev;
1246 struct list_head *ent;
1247 struct net_private *np;
1249 if (event != NETDEV_UP)
1250 goto out;
1252 list_for_each (ent, &dev_list) {
1253 np = list_entry(ent, struct net_private, list);
1254 if (np->dev == dev)
1255 (void)send_fake_arp(dev);
1258 out:
1259 return NOTIFY_DONE;
1262 static struct notifier_block notifier_inetdev = {
1263 .notifier_call = inetdev_notify,
1264 .next = NULL,
1265 .priority = 0
1266 };
1268 static int __init netif_init(void)
1270 int err = 0;
1272 if (xen_start_info.flags & SIF_INITDOMAIN)
1273 return 0;
1275 if ((err = xennet_proc_init()) != 0)
1276 return err;
1278 IPRINTK("Initialising virtual ethernet driver.\n");
1279 INIT_LIST_HEAD(&dev_list);
1280 (void)register_inetaddr_notifier(&notifier_inetdev);
1281 netctrl_init();
1282 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
1283 CALLBACK_IN_BLOCKING_CONTEXT);
1284 send_driver_status(1);
1285 err = probe_interfaces();
1286 if (err)
1287 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
1289 DPRINTK("< err=%d\n", err);
1290 return err;
1293 static void vif_suspend(struct net_private *np)
1295 /* Avoid having tx/rx stuff happen until we're ready. */
1296 free_irq(np->irq, np->dev);
1297 unbind_evtchn_from_irq(np->evtchn);
1300 static void vif_resume(struct net_private *np)
1302 /*
1303 * Connect regardless of whether IFF_UP flag set.
1304 * Stop bad things from happening until we're back up.
1305 */
1306 np->backend_state = BEST_DISCONNECTED;
1307 memset(np->tx, 0, PAGE_SIZE);
1308 memset(np->rx, 0, PAGE_SIZE);
1310 send_interface_connect(np);
1313 void netif_suspend(void)
1315 struct list_head *ent;
1316 struct net_private *np;
1318 list_for_each (ent, &dev_list) {
1319 np = list_entry(ent, struct net_private, list);
1320 vif_suspend(np);
1324 void netif_resume(void)
1326 struct list_head *ent;
1327 struct net_private *np;
1329 list_for_each (ent, &dev_list) {
1330 np = list_entry(ent, struct net_private, list);
1331 vif_resume(np);
1335 #ifdef CONFIG_PROC_FS
1337 #define TARGET_MIN 0UL
1338 #define TARGET_MAX 1UL
1339 #define TARGET_CUR 2UL
1341 static int xennet_proc_read(
1342 char *page, char **start, off_t off, int count, int *eof, void *data)
1344 struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
1345 struct net_private *np = netdev_priv(dev);
1346 int len = 0, which_target = (int)data & 3;
1348 switch (which_target)
1350 case TARGET_MIN:
1351 len = sprintf(page, "%d\n", np->rx_min_target);
1352 break;
1353 case TARGET_MAX:
1354 len = sprintf(page, "%d\n", np->rx_max_target);
1355 break;
1356 case TARGET_CUR:
1357 len = sprintf(page, "%d\n", np->rx_target);
1358 break;
1361 *eof = 1;
1362 return len;
1365 static int xennet_proc_write(
1366 struct file *file, const char __user *buffer,
1367 unsigned long count, void *data)
1369 struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
1370 struct net_private *np = netdev_priv(dev);
1371 int which_target = (int)data & 3;
1372 char string[64];
1373 long target;
1375 if (!capable(CAP_SYS_ADMIN))
1376 return -EPERM;
1378 if (count <= 1)
1379 return -EBADMSG; /* runt */
1380 if (count > sizeof(string))
1381 return -EFBIG; /* too long */
1383 if (copy_from_user(string, buffer, count))
1384 return -EFAULT;
1385 string[sizeof(string)-1] = '\0';
1387 target = simple_strtol(string, NULL, 10);
1388 if (target < RX_MIN_TARGET)
1389 target = RX_MIN_TARGET;
1390 if (target > RX_MAX_TARGET)
1391 target = RX_MAX_TARGET;
1393 spin_lock(&np->rx_lock);
1395 switch (which_target)
1397 case TARGET_MIN:
1398 if (target > np->rx_max_target)
1399 np->rx_max_target = target;
1400 np->rx_min_target = target;
1401 if (target > np->rx_target)
1402 np->rx_target = target;
1403 break;
1404 case TARGET_MAX:
1405 if (target < np->rx_min_target)
1406 np->rx_min_target = target;
1407 np->rx_max_target = target;
1408 if (target < np->rx_target)
1409 np->rx_target = target;
1410 break;
1411 case TARGET_CUR:
1412 break;
1415 network_alloc_rx_buffers(dev);
1417 spin_unlock(&np->rx_lock);
1419 return count;
1422 static int xennet_proc_init(void)
1424 if (proc_mkdir("xen/net", NULL) == NULL)
1425 return -ENOMEM;
1426 return 0;
1429 static int xennet_proc_addif(struct net_device *dev)
1431 struct proc_dir_entry *dir, *min, *max, *cur;
1432 char name[30];
1434 sprintf(name, "xen/net/%s", dev->name);
1436 dir = proc_mkdir(name, NULL);
1437 if (!dir)
1438 goto nomem;
1440 min = create_proc_entry("rxbuf_min", 0644, dir);
1441 max = create_proc_entry("rxbuf_max", 0644, dir);
1442 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1443 if (!min || !max || !cur)
1444 goto nomem;
1446 min->read_proc = xennet_proc_read;
1447 min->write_proc = xennet_proc_write;
1448 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1450 max->read_proc = xennet_proc_read;
1451 max->write_proc = xennet_proc_write;
1452 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1454 cur->read_proc = xennet_proc_read;
1455 cur->write_proc = xennet_proc_write;
1456 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1458 return 0;
1460 nomem:
1461 xennet_proc_delif(dev);
1462 return -ENOMEM;
1465 static void xennet_proc_delif(struct net_device *dev)
1467 char name[30];
1469 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1470 remove_proc_entry(name, NULL);
1472 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1473 remove_proc_entry(name, NULL);
1475 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1476 remove_proc_entry(name, NULL);
1478 sprintf(name, "xen/net/%s", dev->name);
1479 remove_proc_entry(name, NULL);
1482 #endif
1484 module_init(netif_init);