ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 7119:1a82995a017c

Fix netif save/restore.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Sep 28 17:51:24 2005 +0100 (2005-09-28)
parents 9ff1bea68d51
children c7f58e86446f
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 *
6 * This file may be distributed separately from the Linux kernel, or
7 * incorporated into other software packages, subject to the following license:
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this source file (the "Software"), to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
28 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/init.h>
41 #include <linux/bitops.h>
42 #include <linux/proc_fs.h>
43 #include <linux/ethtool.h>
44 #include <net/sock.h>
45 #include <net/pkt_sched.h>
46 #include <net/arp.h>
47 #include <net/route.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm-xen/evtchn.h>
51 #include <asm-xen/xenbus.h>
52 #include <asm-xen/xen-public/io/netif.h>
53 #include <asm-xen/xen-public/memory.h>
54 #include <asm-xen/balloon.h>
55 #include <asm/page.h>
56 #include <asm/uaccess.h>
57 #include <asm-xen/xen-public/grant_table.h>
58 #include <asm-xen/gnttab.h>
60 #define GRANT_INVALID_REF (0xFFFF)
62 #ifndef __GFP_NOWARN
63 #define __GFP_NOWARN 0
64 #endif
65 #define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
67 #define init_skb_shinfo(_skb) \
68 do { \
69 atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
70 skb_shinfo(_skb)->nr_frags = 0; \
71 skb_shinfo(_skb)->frag_list = NULL; \
72 } while (0)
74 /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
75 #define RX_HEADROOM 200
77 /*
78 * If the backend driver is pipelining transmit requests then we can be very
79 * aggressive in avoiding new-packet notifications -- only need to send a
80 * notification if there are no outstanding unreceived responses.
81 * If the backend may be buffering our transmit buffers for any reason then we
82 * are rather more conservative.
83 */
84 #ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
85 #define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
86 #else
87 #define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
88 #endif
91 #define NETIF_STATE_DISCONNECTED 0
92 #define NETIF_STATE_CONNECTED 1
94 static unsigned int netif_state = NETIF_STATE_DISCONNECTED;
96 static void network_tx_buf_gc(struct net_device *dev);
97 static void network_alloc_rx_buffers(struct net_device *dev);
99 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
100 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
101 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
103 #ifdef CONFIG_PROC_FS
104 static int xennet_proc_init(void);
105 static int xennet_proc_addif(struct net_device *dev);
106 static void xennet_proc_delif(struct net_device *dev);
107 #else
108 #define xennet_proc_init() (0)
109 #define xennet_proc_addif(d) (0)
110 #define xennet_proc_delif(d) ((void)0)
111 #endif
113 #define netfront_info net_private
114 struct net_private
115 {
116 struct list_head list;
117 struct net_device *netdev;
119 struct net_device_stats stats;
120 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
121 unsigned int tx_full;
123 netif_tx_interface_t *tx;
124 netif_rx_interface_t *rx;
126 spinlock_t tx_lock;
127 spinlock_t rx_lock;
129 unsigned int handle;
130 unsigned int evtchn;
132 /* What is the status of our connection to the remote backend? */
133 #define BEST_CLOSED 0
134 #define BEST_DISCONNECTED 1
135 #define BEST_CONNECTED 2
136 unsigned int backend_state;
138 /* Is this interface open or closed (down or up)? */
139 #define UST_CLOSED 0
140 #define UST_OPEN 1
141 unsigned int user_state;
143 /* Receive-ring batched refills. */
144 #define RX_MIN_TARGET 8
145 #define RX_MAX_TARGET NETIF_RX_RING_SIZE
146 int rx_min_target, rx_max_target, rx_target;
147 struct sk_buff_head rx_batch;
149 /*
150 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
151 * array is an index into a chain of free entries.
152 */
153 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
154 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
156 grant_ref_t gref_tx_head;
157 grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1];
158 grant_ref_t gref_rx_head;
159 grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1];
161 struct xenbus_device *xbdev;
162 char *backend;
163 int backend_id;
164 struct xenbus_watch watch;
165 int tx_ring_ref;
166 int rx_ring_ref;
167 u8 mac[ETH_ALEN];
168 };
170 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
171 #define ADD_ID_TO_FREELIST(_list, _id) \
172 (_list)[(_id)] = (_list)[0]; \
173 (_list)[0] = (void *)(unsigned long)(_id);
174 #define GET_ID_FROM_FREELIST(_list) \
175 ({ unsigned long _id = (unsigned long)(_list)[0]; \
176 (_list)[0] = (_list)[_id]; \
177 (unsigned short)_id; })
179 #ifdef DEBUG
180 static char *be_state_name[] = {
181 [BEST_CLOSED] = "closed",
182 [BEST_DISCONNECTED] = "disconnected",
183 [BEST_CONNECTED] = "connected",
184 };
185 #endif
187 #ifdef DEBUG
188 #define DPRINTK(fmt, args...) \
189 printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
190 #else
191 #define DPRINTK(fmt, args...) ((void)0)
192 #endif
193 #define IPRINTK(fmt, args...) \
194 printk(KERN_INFO "xen_net: " fmt, ##args)
195 #define WPRINTK(fmt, args...) \
196 printk(KERN_WARNING "xen_net: " fmt, ##args)
198 /** Send a packet on a net device to encourage switches to learn the
199 * MAC. We send a fake ARP request.
200 *
201 * @param dev device
202 * @return 0 on success, error code otherwise
203 */
204 static int send_fake_arp(struct net_device *dev)
205 {
206 struct sk_buff *skb;
207 u32 src_ip, dst_ip;
209 dst_ip = INADDR_BROADCAST;
210 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
212 /* No IP? Then nothing to do. */
213 if (src_ip == 0)
214 return 0;
216 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
217 dst_ip, dev, src_ip,
218 /*dst_hw*/ NULL, /*src_hw*/ NULL,
219 /*target_hw*/ dev->dev_addr);
220 if (skb == NULL)
221 return -ENOMEM;
223 return dev_queue_xmit(skb);
224 }
226 static int network_open(struct net_device *dev)
227 {
228 struct net_private *np = netdev_priv(dev);
230 memset(&np->stats, 0, sizeof(np->stats));
232 np->user_state = UST_OPEN;
234 network_alloc_rx_buffers(dev);
235 np->rx->event = np->rx_resp_cons + 1;
237 netif_start_queue(dev);
239 return 0;
240 }
242 static void network_tx_buf_gc(struct net_device *dev)
243 {
244 NETIF_RING_IDX i, prod;
245 unsigned short id;
246 struct net_private *np = netdev_priv(dev);
247 struct sk_buff *skb;
249 if (np->backend_state != BEST_CONNECTED)
250 return;
252 do {
253 prod = np->tx->resp_prod;
254 rmb(); /* Ensure we see responses up to 'rp'. */
256 for (i = np->tx_resp_cons; i != prod; i++) {
257 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
258 skb = np->tx_skbs[id];
259 if (unlikely(gnttab_query_foreign_access(
260 np->grant_tx_ref[id]) != 0)) {
261 printk(KERN_ALERT "network_tx_buf_gc: warning "
262 "-- grant still in use by backend "
263 "domain.\n");
264 goto out;
265 }
266 gnttab_end_foreign_access_ref(
267 np->grant_tx_ref[id], GNTMAP_readonly);
268 gnttab_release_grant_reference(
269 &np->gref_tx_head, np->grant_tx_ref[id]);
270 np->grant_tx_ref[id] = GRANT_INVALID_REF;
271 ADD_ID_TO_FREELIST(np->tx_skbs, id);
272 dev_kfree_skb_irq(skb);
273 }
275 np->tx_resp_cons = prod;
277 /*
278 * Set a new event, then check for race with update of tx_cons.
279 * Note that it is essential to schedule a callback, no matter
280 * how few buffers are pending. Even if there is space in the
281 * transmit ring, higher layers may be blocked because too much
282 * data is outstanding: in such cases notification from Xen is
283 * likely to be the only kick that we'll get.
284 */
285 np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
286 mb();
287 } while (prod != np->tx->resp_prod);
289 out:
290 if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
291 np->tx_full = 0;
292 if (np->user_state == UST_OPEN)
293 netif_wake_queue(dev);
294 }
295 }
298 static void network_alloc_rx_buffers(struct net_device *dev)
299 {
300 unsigned short id;
301 struct net_private *np = netdev_priv(dev);
302 struct sk_buff *skb;
303 int i, batch_target;
304 NETIF_RING_IDX req_prod = np->rx->req_prod;
305 struct xen_memory_reservation reservation;
306 grant_ref_t ref;
308 if (unlikely(np->backend_state != BEST_CONNECTED))
309 return;
311 /*
312 * Allocate skbuffs greedily, even though we batch updates to the
313 * receive ring. This creates a less bursty demand on the memory
314 * allocator, so should reduce the chance of failed allocation requests
315 * both for ourself and for other kernel subsystems.
316 */
317 batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
318 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
319 skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
320 if (skb == NULL)
321 break;
322 __skb_queue_tail(&np->rx_batch, skb);
323 }
325 /* Is the batch large enough to be worthwhile? */
326 if (i < (np->rx_target/2))
327 return;
329 for (i = 0; ; i++) {
330 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
331 break;
333 skb->dev = dev;
335 id = GET_ID_FROM_FREELIST(np->rx_skbs);
337 np->rx_skbs[id] = skb;
339 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
340 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
341 BUG_ON((signed short)ref < 0);
342 np->grant_rx_ref[id] = ref;
343 gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
344 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
345 rx_pfn_array[i] = virt_to_mfn(skb->head);
347 /* Remove this page from map before passing back to Xen. */
348 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
349 = INVALID_P2M_ENTRY;
351 MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
352 __pte(0), 0);
353 }
355 /* After all PTEs have been zapped we blow away stale TLB entries. */
356 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
358 /* Give away a batch of pages. */
359 rx_mcl[i].op = __HYPERVISOR_memory_op;
360 rx_mcl[i].args[0] = XENMEM_decrease_reservation;
361 rx_mcl[i].args[1] = (unsigned long)&reservation;
363 reservation.extent_start = rx_pfn_array;
364 reservation.nr_extents = i;
365 reservation.extent_order = 0;
366 reservation.address_bits = 0;
367 reservation.domid = DOMID_SELF;
369 /* Tell the ballon driver what is going on. */
370 balloon_update_driver_allowance(i);
372 /* Zap PTEs and give away pages in one big multicall. */
373 (void)HYPERVISOR_multicall(rx_mcl, i+1);
375 /* Check return status of HYPERVISOR_memory_op(). */
376 if (unlikely(rx_mcl[i].result != i))
377 panic("Unable to reduce memory reservation\n");
379 /* Above is a suitable barrier to ensure backend will see requests. */
380 np->rx->req_prod = req_prod + i;
382 /* Adjust our fill target if we risked running out of buffers. */
383 if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
384 ((np->rx_target *= 2) > np->rx_max_target))
385 np->rx_target = np->rx_max_target;
386 }
389 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
390 {
391 unsigned short id;
392 struct net_private *np = netdev_priv(dev);
393 netif_tx_request_t *tx;
394 NETIF_RING_IDX i;
395 grant_ref_t ref;
396 unsigned long mfn;
398 if (unlikely(np->tx_full)) {
399 printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
400 dev->name);
401 netif_stop_queue(dev);
402 goto drop;
403 }
405 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
406 PAGE_SIZE)) {
407 struct sk_buff *nskb;
408 if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
409 goto drop;
410 skb_put(nskb, skb->len);
411 memcpy(nskb->data, skb->data, skb->len);
412 nskb->dev = skb->dev;
413 dev_kfree_skb(skb);
414 skb = nskb;
415 }
417 spin_lock_irq(&np->tx_lock);
419 if (np->backend_state != BEST_CONNECTED) {
420 spin_unlock_irq(&np->tx_lock);
421 goto drop;
422 }
424 i = np->tx->req_prod;
426 id = GET_ID_FROM_FREELIST(np->tx_skbs);
427 np->tx_skbs[id] = skb;
429 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
431 tx->id = id;
432 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
433 BUG_ON((signed short)ref < 0);
434 mfn = virt_to_mfn(skb->data);
435 gnttab_grant_foreign_access_ref(
436 ref, np->backend_id, mfn, GNTMAP_readonly);
437 tx->gref = np->grant_tx_ref[id] = ref;
438 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
439 tx->size = skb->len;
440 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
442 wmb(); /* Ensure that backend will see the request. */
443 np->tx->req_prod = i + 1;
445 network_tx_buf_gc(dev);
447 if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
448 np->tx_full = 1;
449 netif_stop_queue(dev);
450 }
452 spin_unlock_irq(&np->tx_lock);
454 np->stats.tx_bytes += skb->len;
455 np->stats.tx_packets++;
457 /* Only notify Xen if we really have to. */
458 mb();
459 if (np->tx->TX_TEST_IDX == i)
460 notify_via_evtchn(np->evtchn);
462 return 0;
464 drop:
465 np->stats.tx_dropped++;
466 dev_kfree_skb(skb);
467 return 0;
468 }
470 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
471 {
472 struct net_device *dev = dev_id;
473 struct net_private *np = netdev_priv(dev);
474 unsigned long flags;
476 spin_lock_irqsave(&np->tx_lock, flags);
477 network_tx_buf_gc(dev);
478 spin_unlock_irqrestore(&np->tx_lock, flags);
480 if ((np->rx_resp_cons != np->rx->resp_prod) &&
481 (np->user_state == UST_OPEN))
482 netif_rx_schedule(dev);
484 return IRQ_HANDLED;
485 }
488 static int netif_poll(struct net_device *dev, int *pbudget)
489 {
490 struct net_private *np = netdev_priv(dev);
491 struct sk_buff *skb, *nskb;
492 netif_rx_response_t *rx;
493 NETIF_RING_IDX i, rp;
494 mmu_update_t *mmu = rx_mmu;
495 multicall_entry_t *mcl = rx_mcl;
496 int work_done, budget, more_to_do = 1;
497 struct sk_buff_head rxq;
498 unsigned long flags;
499 unsigned long mfn;
500 grant_ref_t ref;
502 spin_lock(&np->rx_lock);
504 if (np->backend_state != BEST_CONNECTED) {
505 spin_unlock(&np->rx_lock);
506 return 0;
507 }
509 skb_queue_head_init(&rxq);
511 if ((budget = *pbudget) > dev->quota)
512 budget = dev->quota;
513 rp = np->rx->resp_prod;
514 rmb(); /* Ensure we see queued responses up to 'rp'. */
516 for (i = np->rx_resp_cons, work_done = 0;
517 (i != rp) && (work_done < budget);
518 i++, work_done++) {
519 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
520 /*
521 * An error here is very odd. Usually indicates a backend bug,
522 * low-mem condition, or we didn't have reservation headroom.
523 */
524 if (unlikely(rx->status <= 0)) {
525 if (net_ratelimit())
526 printk(KERN_WARNING "Bad rx buffer "
527 "(memory squeeze?).\n");
528 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
529 req.id = rx->id;
530 wmb();
531 np->rx->req_prod++;
532 work_done--;
533 continue;
534 }
536 ref = np->grant_rx_ref[rx->id];
538 if(ref == GRANT_INVALID_REF) {
539 printk(KERN_WARNING "Bad rx grant reference %d "
540 "from dom %d.\n",
541 ref, np->backend_id);
542 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
543 req.id = rx->id;
544 wmb();
545 np->rx->req_prod++;
546 work_done--;
547 continue;
548 }
550 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
551 mfn = gnttab_end_foreign_transfer_ref(ref);
552 gnttab_release_grant_reference(&np->gref_rx_head, ref);
554 skb = np->rx_skbs[rx->id];
555 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
557 /* NB. We handle skb overflow later. */
558 skb->data = skb->head + rx->offset;
559 skb->len = rx->status;
560 skb->tail = skb->data + skb->len;
562 if ( rx->csum_valid )
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 np->stats.rx_packets++;
566 np->stats.rx_bytes += rx->status;
568 /* Remap the page. */
569 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
570 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
571 mmu++;
572 MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
573 pfn_pte_ma(mfn, PAGE_KERNEL), 0);
574 mcl++;
576 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
578 __skb_queue_tail(&rxq, skb);
579 }
581 /* Some pages are no longer absent... */
582 balloon_update_driver_allowance(-work_done);
584 /* Do all the remapping work, and M2P updates, in one big hypercall. */
585 if (likely((mcl - rx_mcl) != 0)) {
586 mcl->op = __HYPERVISOR_mmu_update;
587 mcl->args[0] = (unsigned long)rx_mmu;
588 mcl->args[1] = mmu - rx_mmu;
589 mcl->args[2] = 0;
590 mcl->args[3] = DOMID_SELF;
591 mcl++;
592 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
593 }
595 while ((skb = __skb_dequeue(&rxq)) != NULL) {
596 /*
597 * Enough room in skbuff for the data we were passed? Also,
598 * Linux expects at least 16 bytes headroom in each rx buffer.
599 */
600 if (unlikely(skb->tail > skb->end) ||
601 unlikely((skb->data - skb->head) < 16)) {
602 nskb = NULL;
604 /* Only copy the packet if it fits in the MTU. */
605 if (skb->len <= (dev->mtu + ETH_HLEN)) {
606 if ((skb->tail > skb->end) && net_ratelimit())
607 printk(KERN_INFO "Received packet "
608 "needs %zd bytes more "
609 "headroom.\n",
610 skb->tail - skb->end);
612 nskb = alloc_xen_skb(skb->len + 2);
613 if (nskb != NULL) {
614 skb_reserve(nskb, 2);
615 skb_put(nskb, skb->len);
616 memcpy(nskb->data,
617 skb->data,
618 skb->len);
619 nskb->dev = skb->dev;
620 }
621 }
622 else if (net_ratelimit())
623 printk(KERN_INFO "Received packet too big for "
624 "MTU (%d > %d)\n",
625 skb->len - ETH_HLEN, dev->mtu);
627 /* Reinitialise and then destroy the old skbuff. */
628 skb->len = 0;
629 skb->tail = skb->data;
630 init_skb_shinfo(skb);
631 dev_kfree_skb(skb);
633 /* Switch old for new, if we copied the buffer. */
634 if ((skb = nskb) == NULL)
635 continue;
636 }
638 /* Set the shinfo area, which is hidden behind the data. */
639 init_skb_shinfo(skb);
640 /* Ethernet work: Delayed to here as it peeks the header. */
641 skb->protocol = eth_type_trans(skb, dev);
643 /* Pass it up. */
644 netif_receive_skb(skb);
645 dev->last_rx = jiffies;
646 }
648 np->rx_resp_cons = i;
650 /* If we get a callback with very few responses, reduce fill target. */
651 /* NB. Note exponential increase, linear decrease. */
652 if (((np->rx->req_prod - np->rx->resp_prod) >
653 ((3*np->rx_target) / 4)) &&
654 (--np->rx_target < np->rx_min_target))
655 np->rx_target = np->rx_min_target;
657 network_alloc_rx_buffers(dev);
659 *pbudget -= work_done;
660 dev->quota -= work_done;
662 if (work_done < budget) {
663 local_irq_save(flags);
665 np->rx->event = i + 1;
667 /* Deal with hypervisor racing our resetting of rx_event. */
668 mb();
669 if (np->rx->resp_prod == i) {
670 __netif_rx_complete(dev);
671 more_to_do = 0;
672 }
674 local_irq_restore(flags);
675 }
677 spin_unlock(&np->rx_lock);
679 return more_to_do;
680 }
683 static int network_close(struct net_device *dev)
684 {
685 struct net_private *np = netdev_priv(dev);
686 np->user_state = UST_CLOSED;
687 netif_stop_queue(np->netdev);
688 return 0;
689 }
692 static struct net_device_stats *network_get_stats(struct net_device *dev)
693 {
694 struct net_private *np = netdev_priv(dev);
695 return &np->stats;
696 }
698 static void network_connect(struct net_device *dev)
699 {
700 struct net_private *np;
701 int i, requeue_idx;
702 netif_tx_request_t *tx;
703 struct sk_buff *skb;
705 np = netdev_priv(dev);
706 spin_lock_irq(&np->tx_lock);
707 spin_lock(&np->rx_lock);
709 /* Recovery procedure: */
711 /* Step 1: Reinitialise variables. */
712 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
713 np->rx->event = np->tx->event = 1;
715 /*
716 * Step 2: Rebuild the RX and TX ring contents.
717 * NB. We could just free the queued TX packets now but we hope
718 * that sending them out might do some good. We have to rebuild
719 * the RX ring because some of our pages are currently flipped out
720 * so we can't just free the RX skbs.
721 * NB2. Freelist index entries are always going to be less than
722 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
723 * greater than __PAGE_OFFSET: we use this property to distinguish
724 * them.
725 */
727 /*
728 * Rebuild the TX buffer freelist and the TX ring itself.
729 * NB. This reorders packets. We could keep more private state
730 * to avoid this but maybe it doesn't matter so much given the
731 * interface has been down.
732 */
733 for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
734 if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
735 continue;
737 skb = np->tx_skbs[i];
739 tx = &np->tx->ring[requeue_idx++].req;
741 tx->id = i;
742 gnttab_grant_foreign_access_ref(
743 np->grant_tx_ref[i], np->backend_id,
744 virt_to_mfn(np->tx_skbs[i]->data),
745 GNTMAP_readonly);
746 tx->gref = np->grant_tx_ref[i];
747 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
748 tx->size = skb->len;
749 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
751 np->stats.tx_bytes += skb->len;
752 np->stats.tx_packets++;
753 }
754 wmb();
755 np->tx->req_prod = requeue_idx;
757 /* Rebuild the RX buffer freelist and the RX ring itself. */
758 for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) {
759 if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
760 continue;
761 gnttab_grant_foreign_transfer_ref(
762 np->grant_rx_ref[i], np->backend_id);
763 np->rx->ring[requeue_idx].req.gref =
764 np->grant_rx_ref[i];
765 np->rx->ring[requeue_idx].req.id = i;
766 requeue_idx++;
767 }
768 wmb();
769 np->rx->req_prod = requeue_idx;
771 /*
772 * Step 3: All public and private state should now be sane. Get
773 * ready to start sending and receiving packets and give the driver
774 * domain a kick because we've probably just requeued some
775 * packets.
776 */
777 np->backend_state = BEST_CONNECTED;
778 wmb();
779 notify_via_evtchn(np->evtchn);
780 network_tx_buf_gc(dev);
782 if (np->user_state == UST_OPEN)
783 netif_start_queue(dev);
785 spin_unlock(&np->rx_lock);
786 spin_unlock_irq(&np->tx_lock);
787 }
789 static void show_device(struct net_private *np)
790 {
791 #ifdef DEBUG
792 if (np) {
793 IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
794 np->handle,
795 be_state_name[np->backend_state],
796 np->user_state ? "open" : "closed",
797 np->evtchn,
798 np->tx,
799 np->rx);
800 } else {
801 IPRINTK("<vif NULL>\n");
802 }
803 #endif
804 }
806 /*
807 * Move the vif into connected state.
808 * Sets the mac and event channel from the message.
809 * Binds the irq to the event channel.
810 */
811 static void
812 connect_device(struct net_private *np, unsigned int evtchn)
813 {
814 struct net_device *dev = np->netdev;
815 memcpy(dev->dev_addr, np->mac, ETH_ALEN);
816 np->evtchn = evtchn;
817 network_connect(dev);
818 (void)bind_evtchn_to_irqhandler(
819 np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
820 (void)send_fake_arp(dev);
821 show_device(np);
822 }
824 static void netif_uninit(struct net_device *dev)
825 {
826 struct net_private *np = netdev_priv(dev);
827 gnttab_free_grant_references(np->gref_tx_head);
828 gnttab_free_grant_references(np->gref_rx_head);
829 }
831 static struct ethtool_ops network_ethtool_ops =
832 {
833 .get_tx_csum = ethtool_op_get_tx_csum,
834 .set_tx_csum = ethtool_op_set_tx_csum,
835 };
837 /** Create a network device.
838 * @param handle device handle
839 * @param val return parameter for created device
840 * @return 0 on success, error code otherwise
841 */
842 static int create_netdev(int handle, struct xenbus_device *dev,
843 struct net_device **val)
844 {
845 int i, err = 0;
846 struct net_device *netdev = NULL;
847 struct net_private *np = NULL;
849 if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
850 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
851 __FUNCTION__);
852 err = -ENOMEM;
853 goto exit;
854 }
856 np = netdev_priv(netdev);
857 np->backend_state = BEST_CLOSED;
858 np->user_state = UST_CLOSED;
859 np->handle = handle;
860 np->xbdev = dev;
862 spin_lock_init(&np->tx_lock);
863 spin_lock_init(&np->rx_lock);
865 skb_queue_head_init(&np->rx_batch);
866 np->rx_target = RX_MIN_TARGET;
867 np->rx_min_target = RX_MIN_TARGET;
868 np->rx_max_target = RX_MAX_TARGET;
870 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
871 for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
872 np->tx_skbs[i] = (void *)((unsigned long) i+1);
873 np->grant_tx_ref[i] = GRANT_INVALID_REF;
874 }
876 for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
877 np->rx_skbs[i] = (void *)((unsigned long) i+1);
878 np->grant_rx_ref[i] = GRANT_INVALID_REF;
879 }
881 /* A grant for every tx ring slot */
882 if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
883 &np->gref_tx_head) < 0) {
884 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
885 goto exit;
886 }
887 /* A grant for every rx ring slot */
888 if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
889 &np->gref_rx_head) < 0) {
890 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
891 gnttab_free_grant_references(np->gref_tx_head);
892 goto exit;
893 }
895 netdev->open = network_open;
896 netdev->hard_start_xmit = network_start_xmit;
897 netdev->stop = network_close;
898 netdev->get_stats = network_get_stats;
899 netdev->poll = netif_poll;
900 netdev->uninit = netif_uninit;
901 netdev->weight = 64;
902 netdev->features = NETIF_F_IP_CSUM;
904 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
906 if ((err = register_netdev(netdev)) != 0) {
907 printk(KERN_WARNING "%s> register_netdev err=%d\n",
908 __FUNCTION__, err);
909 goto exit_free_grefs;
910 }
912 if ((err = xennet_proc_addif(netdev)) != 0) {
913 unregister_netdev(netdev);
914 goto exit_free_grefs;
915 }
917 np->netdev = netdev;
919 exit:
920 if ((err != 0) && (netdev != NULL))
921 kfree(netdev);
922 else if (val != NULL)
923 *val = netdev;
924 return err;
926 exit_free_grefs:
927 gnttab_free_grant_references(np->gref_tx_head);
928 gnttab_free_grant_references(np->gref_rx_head);
929 goto exit;
930 }
932 static int destroy_netdev(struct net_device *netdev)
933 {
934 #ifdef CONFIG_PROC_FS
935 xennet_proc_delif(netdev);
936 #endif
937 unregister_netdev(netdev);
938 return 0;
939 }
941 /*
942 * We use this notifier to send out a fake ARP reply to reset switches and
943 * router ARP caches when an IP interface is brought up on a VIF.
944 */
945 static int
946 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
947 {
948 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
949 struct net_device *dev = ifa->ifa_dev->dev;
951 /* UP event and is it one of our devices? */
952 if (event == NETDEV_UP && dev->open == network_open)
953 (void)send_fake_arp(dev);
955 return NOTIFY_DONE;
956 }
958 static struct notifier_block notifier_inetdev = {
959 .notifier_call = inetdev_notify,
960 .next = NULL,
961 .priority = 0
962 };
964 static struct xenbus_device_id netfront_ids[] = {
965 { "vif" },
966 { "" }
967 };
969 static void watch_for_status(struct xenbus_watch *watch, const char *node)
970 {
971 }
973 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
974 {
975 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
976 int err;
978 info->tx_ring_ref = GRANT_INVALID_REF;
979 info->rx_ring_ref = GRANT_INVALID_REF;
981 info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
982 if (info->tx == 0) {
983 err = -ENOMEM;
984 xenbus_dev_error(dev, err, "allocating tx ring page");
985 goto out;
986 }
987 info->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
988 if (info->rx == 0) {
989 err = -ENOMEM;
990 xenbus_dev_error(dev, err, "allocating rx ring page");
991 goto out;
992 }
993 memset(info->tx, 0, PAGE_SIZE);
994 memset(info->rx, 0, PAGE_SIZE);
995 info->backend_state = BEST_DISCONNECTED;
997 err = gnttab_grant_foreign_access(info->backend_id,
998 virt_to_mfn(info->tx), 0);
999 if (err < 0) {
1000 xenbus_dev_error(dev, err, "granting access to tx ring page");
1001 goto out;
1003 info->tx_ring_ref = err;
1005 err = gnttab_grant_foreign_access(info->backend_id,
1006 virt_to_mfn(info->rx), 0);
1007 if (err < 0) {
1008 xenbus_dev_error(dev, err, "granting access to rx ring page");
1009 goto out;
1011 info->rx_ring_ref = err;
1013 op.u.alloc_unbound.dom = info->backend_id;
1014 err = HYPERVISOR_event_channel_op(&op);
1015 if (err) {
1016 xenbus_dev_error(dev, err, "allocating event channel");
1017 goto out;
1019 connect_device(info, op.u.alloc_unbound.port);
1020 return 0;
1022 out:
1023 if (info->tx)
1024 free_page((unsigned long)info->tx);
1025 info->tx = 0;
1026 if (info->rx)
1027 free_page((unsigned long)info->rx);
1028 info->rx = 0;
1030 if (info->tx_ring_ref != GRANT_INVALID_REF)
1031 gnttab_end_foreign_access(info->tx_ring_ref, 0);
1032 info->tx_ring_ref = GRANT_INVALID_REF;
1034 if (info->rx_ring_ref != GRANT_INVALID_REF)
1035 gnttab_end_foreign_access(info->rx_ring_ref, 0);
1036 info->rx_ring_ref = GRANT_INVALID_REF;
1038 return err;
1041 static void netif_free(struct netfront_info *info)
1043 if (info->tx)
1044 free_page((unsigned long)info->tx);
1045 info->tx = 0;
1046 if (info->rx)
1047 free_page((unsigned long)info->rx);
1048 info->rx = 0;
1050 if (info->tx_ring_ref != GRANT_INVALID_REF)
1051 gnttab_end_foreign_access(info->tx_ring_ref, 0);
1052 info->tx_ring_ref = GRANT_INVALID_REF;
1054 if (info->rx_ring_ref != GRANT_INVALID_REF)
1055 gnttab_end_foreign_access(info->rx_ring_ref, 0);
1056 info->rx_ring_ref = GRANT_INVALID_REF;
1058 unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
1059 info->evtchn = 0;
1062 /* Stop network device and free tx/rx queues and irq. */
1063 static void shutdown_device(struct net_private *np)
1065 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1066 spin_lock_irq(&np->tx_lock);
1067 spin_lock(&np->rx_lock);
1068 netif_stop_queue(np->netdev);
1069 /* np->backend_state = BEST_DISCONNECTED; */
1070 spin_unlock(&np->rx_lock);
1071 spin_unlock_irq(&np->tx_lock);
1073 /* Free resources. */
1074 netif_free(np);
1077 /* Common code used when first setting up, and when resuming. */
1078 static int talk_to_backend(struct xenbus_device *dev,
1079 struct netfront_info *info)
1081 char *backend, *mac, *e, *s;
1082 const char *message;
1083 int err, i;
1085 backend = NULL;
1086 err = xenbus_gather(dev->nodename,
1087 "backend-id", "%i", &info->backend_id,
1088 "backend", NULL, &backend,
1089 NULL);
1090 if (XENBUS_EXIST_ERR(err))
1091 goto out;
1092 if (backend && strlen(backend) == 0) {
1093 err = -ENOENT;
1094 goto out;
1096 if (err < 0) {
1097 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
1098 dev->nodename);
1099 goto out;
1102 mac = xenbus_read(dev->nodename, "mac", NULL);
1103 if (IS_ERR(mac)) {
1104 err = PTR_ERR(mac);
1105 xenbus_dev_error(dev, err, "reading %s/mac",
1106 dev->nodename);
1107 goto out;
1109 s = mac;
1110 for (i = 0; i < ETH_ALEN; i++) {
1111 info->mac[i] = simple_strtoul(s, &e, 16);
1112 if (s == e || (e[0] != ':' && e[0] != 0)) {
1113 kfree(mac);
1114 err = -ENOENT;
1115 xenbus_dev_error(dev, err, "parsing %s/mac",
1116 dev->nodename);
1117 goto out;
1119 s = &e[1];
1121 kfree(mac);
1123 /* Create shared ring, alloc event channel. */
1124 err = setup_device(dev, info);
1125 if (err) {
1126 xenbus_dev_error(dev, err, "setting up ring");
1127 goto out;
1130 again:
1131 err = xenbus_transaction_start();
1132 if (err) {
1133 xenbus_dev_error(dev, err, "starting transaction");
1134 goto destroy_ring;
1137 err = xenbus_printf(dev->nodename, "tx-ring-ref","%u",
1138 info->tx_ring_ref);
1139 if (err) {
1140 message = "writing tx ring-ref";
1141 goto abort_transaction;
1143 err = xenbus_printf(dev->nodename, "rx-ring-ref","%u",
1144 info->rx_ring_ref);
1145 if (err) {
1146 message = "writing rx ring-ref";
1147 goto abort_transaction;
1149 err = xenbus_printf(dev->nodename,
1150 "event-channel", "%u", info->evtchn);
1151 if (err) {
1152 message = "writing event-channel";
1153 goto abort_transaction;
1156 err = xenbus_transaction_end(0);
1157 if (err) {
1158 if (err == -EAGAIN)
1159 goto again;
1160 xenbus_dev_error(dev, err, "completing transaction");
1161 goto destroy_ring;
1164 info->watch.node = backend;
1165 info->watch.callback = watch_for_status;
1166 err = register_xenbus_watch(&info->watch);
1167 if (err) {
1168 message = "registering watch on backend";
1169 goto destroy_ring;
1172 info->backend = backend;
1174 netif_state = NETIF_STATE_CONNECTED;
1176 return 0;
1178 abort_transaction:
1179 xenbus_transaction_end(1);
1180 /* Have to do this *outside* transaction. */
1181 xenbus_dev_error(dev, err, "%s", message);
1182 destroy_ring:
1183 shutdown_device(info);
1184 out:
1185 if (backend)
1186 kfree(backend);
1187 return err;
1190 /*
1191 * Setup supplies the backend dir, virtual device.
1192 * We place an event channel and shared frame entries.
1193 * We watch backend to wait if it's ok.
1194 */
1195 static int netfront_probe(struct xenbus_device *dev,
1196 const struct xenbus_device_id *id)
1198 int err;
1199 struct net_device *netdev;
1200 struct netfront_info *info;
1201 unsigned int handle;
1203 err = xenbus_scanf(dev->nodename, "handle", "%u", &handle);
1204 if (XENBUS_EXIST_ERR(err))
1205 return err;
1206 if (err < 0) {
1207 xenbus_dev_error(dev, err, "reading handle");
1208 return err;
1211 err = create_netdev(handle, dev, &netdev);
1212 if (err) {
1213 xenbus_dev_error(dev, err, "creating netdev");
1214 return err;
1217 info = netdev_priv(netdev);
1218 dev->data = info;
1220 err = talk_to_backend(dev, info);
1221 if (err) {
1222 destroy_netdev(netdev);
1223 kfree(netdev);
1224 dev->data = NULL;
1225 return err;
1228 return 0;
1231 static int netfront_remove(struct xenbus_device *dev)
1233 struct netfront_info *info = dev->data;
1235 if (info->backend)
1236 unregister_xenbus_watch(&info->watch);
1238 netif_free(info);
1240 kfree(info->backend);
1241 kfree(info);
1243 return 0;
1246 static int netfront_suspend(struct xenbus_device *dev)
1248 struct netfront_info *info = dev->data;
1250 unregister_xenbus_watch(&info->watch);
1251 kfree(info->backend);
1252 info->backend = NULL;
1254 netif_free(info);
1256 return 0;
1259 static int netfront_resume(struct xenbus_device *dev)
1261 struct net_private *np = dev->data;
1262 int err;
1264 err = talk_to_backend(dev, np);
1266 return err;
1269 static struct xenbus_driver netfront = {
1270 .name = "vif",
1271 .owner = THIS_MODULE,
1272 .ids = netfront_ids,
1273 .probe = netfront_probe,
1274 .remove = netfront_remove,
1275 .resume = netfront_resume,
1276 .suspend = netfront_suspend,
1277 };
1279 static void __init init_net_xenbus(void)
1281 xenbus_register_device(&netfront);
1284 static int wait_for_netif(void)
1286 int err = 0;
1287 int i;
1289 /*
1290 * We should figure out how many and which devices we need to
1291 * proceed and only wait for those. For now, continue once the
1292 * first device is around.
1293 */
1294 for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
1296 set_current_state(TASK_INTERRUPTIBLE);
1297 schedule_timeout(1);
1300 if (netif_state != NETIF_STATE_CONNECTED) {
1301 WPRINTK("Timeout connecting to device!\n");
1302 err = -ENOSYS;
1304 return err;
1307 static int __init netif_init(void)
1309 int err = 0;
1311 if (xen_start_info->flags & SIF_INITDOMAIN)
1312 return 0;
1314 if ((err = xennet_proc_init()) != 0)
1315 return err;
1317 IPRINTK("Initialising virtual ethernet driver.\n");
1319 (void)register_inetaddr_notifier(&notifier_inetdev);
1321 init_net_xenbus();
1323 wait_for_netif();
1325 return err;
1328 static void netif_exit(void)
1332 #ifdef CONFIG_PROC_FS
1334 #define TARGET_MIN 0UL
1335 #define TARGET_MAX 1UL
1336 #define TARGET_CUR 2UL
1338 static int xennet_proc_read(
1339 char *page, char **start, off_t off, int count, int *eof, void *data)
1341 struct net_device *dev =
1342 (struct net_device *)((unsigned long)data & ~3UL);
1343 struct net_private *np = netdev_priv(dev);
1344 int len = 0, which_target = (long)data & 3;
1346 switch (which_target)
1348 case TARGET_MIN:
1349 len = sprintf(page, "%d\n", np->rx_min_target);
1350 break;
1351 case TARGET_MAX:
1352 len = sprintf(page, "%d\n", np->rx_max_target);
1353 break;
1354 case TARGET_CUR:
1355 len = sprintf(page, "%d\n", np->rx_target);
1356 break;
1359 *eof = 1;
1360 return len;
1363 static int xennet_proc_write(
1364 struct file *file, const char __user *buffer,
1365 unsigned long count, void *data)
1367 struct net_device *dev =
1368 (struct net_device *)((unsigned long)data & ~3UL);
1369 struct net_private *np = netdev_priv(dev);
1370 int which_target = (long)data & 3;
1371 char string[64];
1372 long target;
1374 if (!capable(CAP_SYS_ADMIN))
1375 return -EPERM;
1377 if (count <= 1)
1378 return -EBADMSG; /* runt */
1379 if (count > sizeof(string))
1380 return -EFBIG; /* too long */
1382 if (copy_from_user(string, buffer, count))
1383 return -EFAULT;
1384 string[sizeof(string)-1] = '\0';
1386 target = simple_strtol(string, NULL, 10);
1387 if (target < RX_MIN_TARGET)
1388 target = RX_MIN_TARGET;
1389 if (target > RX_MAX_TARGET)
1390 target = RX_MAX_TARGET;
1392 spin_lock(&np->rx_lock);
1394 switch (which_target)
1396 case TARGET_MIN:
1397 if (target > np->rx_max_target)
1398 np->rx_max_target = target;
1399 np->rx_min_target = target;
1400 if (target > np->rx_target)
1401 np->rx_target = target;
1402 break;
1403 case TARGET_MAX:
1404 if (target < np->rx_min_target)
1405 np->rx_min_target = target;
1406 np->rx_max_target = target;
1407 if (target < np->rx_target)
1408 np->rx_target = target;
1409 break;
1410 case TARGET_CUR:
1411 break;
1414 network_alloc_rx_buffers(dev);
1416 spin_unlock(&np->rx_lock);
1418 return count;
1421 static int xennet_proc_init(void)
1423 if (proc_mkdir("xen/net", NULL) == NULL)
1424 return -ENOMEM;
1425 return 0;
1428 static int xennet_proc_addif(struct net_device *dev)
1430 struct proc_dir_entry *dir, *min, *max, *cur;
1431 char name[30];
1433 sprintf(name, "xen/net/%s", dev->name);
1435 dir = proc_mkdir(name, NULL);
1436 if (!dir)
1437 goto nomem;
1439 min = create_proc_entry("rxbuf_min", 0644, dir);
1440 max = create_proc_entry("rxbuf_max", 0644, dir);
1441 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1442 if (!min || !max || !cur)
1443 goto nomem;
1445 min->read_proc = xennet_proc_read;
1446 min->write_proc = xennet_proc_write;
1447 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1449 max->read_proc = xennet_proc_read;
1450 max->write_proc = xennet_proc_write;
1451 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1453 cur->read_proc = xennet_proc_read;
1454 cur->write_proc = xennet_proc_write;
1455 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1457 return 0;
1459 nomem:
1460 xennet_proc_delif(dev);
1461 return -ENOMEM;
1464 static void xennet_proc_delif(struct net_device *dev)
1466 char name[30];
1468 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1469 remove_proc_entry(name, NULL);
1471 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1472 remove_proc_entry(name, NULL);
1474 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1475 remove_proc_entry(name, NULL);
1477 sprintf(name, "xen/net/%s", dev->name);
1478 remove_proc_entry(name, NULL);
1481 #endif
1483 module_init(netif_init);
1484 module_exit(netif_exit);
1486 /*
1487 * Local variables:
1488 * c-file-style: "linux"
1489 * indent-tabs-mode: t
1490 * c-indent-level: 8
1491 * c-basic-offset: 8
1492 * tab-width: 8
1493 * End:
1494 */