ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 7566:8e1bfcb901e5

Once grant refs run out, netfront prints a nice message, but doesn't set
err properly to notify the caller, and the domU crashes. (See bug 183 for
details).

Signed-off-by: Jim Dykman <dykman@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 30 16:43:05 2005 +0100 (2005-10-30)
parents 74d56b7ff46c
children 43676a509982
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 *
6 * This file may be distributed separately from the Linux kernel, or
7 * incorporated into other software packages, subject to the following license:
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this source file (the "Software"), to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
28 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/init.h>
41 #include <linux/bitops.h>
42 #include <linux/proc_fs.h>
43 #include <linux/ethtool.h>
44 #include <net/sock.h>
45 #include <net/pkt_sched.h>
46 #include <net/arp.h>
47 #include <net/route.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm-xen/evtchn.h>
51 #include <asm-xen/xenbus.h>
52 #include <asm-xen/xen-public/io/netif.h>
53 #include <asm-xen/xen-public/memory.h>
54 #include <asm-xen/balloon.h>
55 #include <asm/page.h>
56 #include <asm/uaccess.h>
57 #include <asm-xen/xen-public/grant_table.h>
58 #include <asm-xen/gnttab.h>
60 #define GRANT_INVALID_REF 0
62 #ifndef __GFP_NOWARN
63 #define __GFP_NOWARN 0
64 #endif
65 #define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
67 #define init_skb_shinfo(_skb) \
68 do { \
69 atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
70 skb_shinfo(_skb)->nr_frags = 0; \
71 skb_shinfo(_skb)->frag_list = NULL; \
72 } while (0)
74 /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
75 #define RX_HEADROOM 200
77 /*
78 * If the backend driver is pipelining transmit requests then we can be very
79 * aggressive in avoiding new-packet notifications -- only need to send a
80 * notification if there are no outstanding unreceived responses.
81 * If the backend may be buffering our transmit buffers for any reason then we
82 * are rather more conservative.
83 */
84 #ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
85 #define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
86 #else
87 #define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
88 #endif
91 static void network_tx_buf_gc(struct net_device *dev);
92 static void network_alloc_rx_buffers(struct net_device *dev);
94 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
95 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
96 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
98 #ifdef CONFIG_PROC_FS
99 static int xennet_proc_init(void);
100 static int xennet_proc_addif(struct net_device *dev);
101 static void xennet_proc_delif(struct net_device *dev);
102 #else
103 #define xennet_proc_init() (0)
104 #define xennet_proc_addif(d) (0)
105 #define xennet_proc_delif(d) ((void)0)
106 #endif
108 #define netfront_info net_private
109 struct net_private
110 {
111 struct list_head list;
112 struct net_device *netdev;
114 struct net_device_stats stats;
115 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
116 unsigned int tx_full;
118 netif_tx_interface_t *tx;
119 netif_rx_interface_t *rx;
121 spinlock_t tx_lock;
122 spinlock_t rx_lock;
124 unsigned int handle;
125 unsigned int evtchn, irq;
127 /* What is the status of our connection to the remote backend? */
128 #define BEST_CLOSED 0
129 #define BEST_DISCONNECTED 1
130 #define BEST_CONNECTED 2
131 unsigned int backend_state;
133 /* Is this interface open or closed (down or up)? */
134 #define UST_CLOSED 0
135 #define UST_OPEN 1
136 unsigned int user_state;
138 /* Receive-ring batched refills. */
139 #define RX_MIN_TARGET 8
140 #define RX_MAX_TARGET NETIF_RX_RING_SIZE
141 int rx_min_target, rx_max_target, rx_target;
142 struct sk_buff_head rx_batch;
144 /*
145 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
146 * array is an index into a chain of free entries.
147 */
148 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
149 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
151 grant_ref_t gref_tx_head;
152 grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1];
153 grant_ref_t gref_rx_head;
154 grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1];
156 struct xenbus_device *xbdev;
157 char *backend;
158 int backend_id;
159 struct xenbus_watch watch;
160 int tx_ring_ref;
161 int rx_ring_ref;
162 u8 mac[ETH_ALEN];
163 };
165 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
166 #define ADD_ID_TO_FREELIST(_list, _id) \
167 (_list)[(_id)] = (_list)[0]; \
168 (_list)[0] = (void *)(unsigned long)(_id);
169 #define GET_ID_FROM_FREELIST(_list) \
170 ({ unsigned long _id = (unsigned long)(_list)[0]; \
171 (_list)[0] = (_list)[_id]; \
172 (unsigned short)_id; })
174 #ifdef DEBUG
175 static char *be_state_name[] = {
176 [BEST_CLOSED] = "closed",
177 [BEST_DISCONNECTED] = "disconnected",
178 [BEST_CONNECTED] = "connected",
179 };
180 #endif
182 #ifdef DEBUG
183 #define DPRINTK(fmt, args...) \
184 printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
185 #else
186 #define DPRINTK(fmt, args...) ((void)0)
187 #endif
188 #define IPRINTK(fmt, args...) \
189 printk(KERN_INFO "xen_net: " fmt, ##args)
190 #define WPRINTK(fmt, args...) \
191 printk(KERN_WARNING "xen_net: " fmt, ##args)
193 /** Send a packet on a net device to encourage switches to learn the
194 * MAC. We send a fake ARP request.
195 *
196 * @param dev device
197 * @return 0 on success, error code otherwise
198 */
199 static int send_fake_arp(struct net_device *dev)
200 {
201 struct sk_buff *skb;
202 u32 src_ip, dst_ip;
204 dst_ip = INADDR_BROADCAST;
205 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
207 /* No IP? Then nothing to do. */
208 if (src_ip == 0)
209 return 0;
211 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
212 dst_ip, dev, src_ip,
213 /*dst_hw*/ NULL, /*src_hw*/ NULL,
214 /*target_hw*/ dev->dev_addr);
215 if (skb == NULL)
216 return -ENOMEM;
218 return dev_queue_xmit(skb);
219 }
221 static int network_open(struct net_device *dev)
222 {
223 struct net_private *np = netdev_priv(dev);
225 memset(&np->stats, 0, sizeof(np->stats));
227 np->user_state = UST_OPEN;
229 network_alloc_rx_buffers(dev);
230 np->rx->event = np->rx_resp_cons + 1;
232 netif_start_queue(dev);
234 return 0;
235 }
237 static void network_tx_buf_gc(struct net_device *dev)
238 {
239 NETIF_RING_IDX i, prod;
240 unsigned short id;
241 struct net_private *np = netdev_priv(dev);
242 struct sk_buff *skb;
244 if (np->backend_state != BEST_CONNECTED)
245 return;
247 do {
248 prod = np->tx->resp_prod;
249 rmb(); /* Ensure we see responses up to 'rp'. */
251 for (i = np->tx_resp_cons; i != prod; i++) {
252 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
253 skb = np->tx_skbs[id];
254 if (unlikely(gnttab_query_foreign_access(
255 np->grant_tx_ref[id]) != 0)) {
256 printk(KERN_ALERT "network_tx_buf_gc: warning "
257 "-- grant still in use by backend "
258 "domain.\n");
259 goto out;
260 }
261 gnttab_end_foreign_access_ref(
262 np->grant_tx_ref[id], GNTMAP_readonly);
263 gnttab_release_grant_reference(
264 &np->gref_tx_head, np->grant_tx_ref[id]);
265 np->grant_tx_ref[id] = GRANT_INVALID_REF;
266 ADD_ID_TO_FREELIST(np->tx_skbs, id);
267 dev_kfree_skb_irq(skb);
268 }
270 np->tx_resp_cons = prod;
272 /*
273 * Set a new event, then check for race with update of tx_cons.
274 * Note that it is essential to schedule a callback, no matter
275 * how few buffers are pending. Even if there is space in the
276 * transmit ring, higher layers may be blocked because too much
277 * data is outstanding: in such cases notification from Xen is
278 * likely to be the only kick that we'll get.
279 */
280 np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
281 mb();
282 } while (prod != np->tx->resp_prod);
284 out:
285 if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
286 np->tx_full = 0;
287 if (np->user_state == UST_OPEN)
288 netif_wake_queue(dev);
289 }
290 }
293 static void network_alloc_rx_buffers(struct net_device *dev)
294 {
295 unsigned short id;
296 struct net_private *np = netdev_priv(dev);
297 struct sk_buff *skb;
298 int i, batch_target;
299 NETIF_RING_IDX req_prod = np->rx->req_prod;
300 struct xen_memory_reservation reservation;
301 grant_ref_t ref;
303 if (unlikely(np->backend_state != BEST_CONNECTED))
304 return;
306 /*
307 * Allocate skbuffs greedily, even though we batch updates to the
308 * receive ring. This creates a less bursty demand on the memory
309 * allocator, so should reduce the chance of failed allocation requests
310 * both for ourself and for other kernel subsystems.
311 */
312 batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
313 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
314 skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
315 if (skb == NULL)
316 break;
317 __skb_queue_tail(&np->rx_batch, skb);
318 }
320 /* Is the batch large enough to be worthwhile? */
321 if (i < (np->rx_target/2))
322 return;
324 for (i = 0; ; i++) {
325 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
326 break;
328 skb->dev = dev;
330 id = GET_ID_FROM_FREELIST(np->rx_skbs);
332 np->rx_skbs[id] = skb;
334 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
335 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
336 BUG_ON((signed short)ref < 0);
337 np->grant_rx_ref[id] = ref;
338 gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
339 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
340 rx_pfn_array[i] = virt_to_mfn(skb->head);
342 /* Remove this page from map before passing back to Xen. */
343 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
344 = INVALID_P2M_ENTRY;
346 MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
347 __pte(0), 0);
348 }
350 /* After all PTEs have been zapped we blow away stale TLB entries. */
351 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
353 /* Give away a batch of pages. */
354 rx_mcl[i].op = __HYPERVISOR_memory_op;
355 rx_mcl[i].args[0] = XENMEM_decrease_reservation;
356 rx_mcl[i].args[1] = (unsigned long)&reservation;
358 reservation.extent_start = rx_pfn_array;
359 reservation.nr_extents = i;
360 reservation.extent_order = 0;
361 reservation.address_bits = 0;
362 reservation.domid = DOMID_SELF;
364 /* Tell the ballon driver what is going on. */
365 balloon_update_driver_allowance(i);
367 /* Zap PTEs and give away pages in one big multicall. */
368 (void)HYPERVISOR_multicall(rx_mcl, i+1);
370 /* Check return status of HYPERVISOR_memory_op(). */
371 if (unlikely(rx_mcl[i].result != i))
372 panic("Unable to reduce memory reservation\n");
374 /* Above is a suitable barrier to ensure backend will see requests. */
375 np->rx->req_prod = req_prod + i;
377 /* Adjust our fill target if we risked running out of buffers. */
378 if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
379 ((np->rx_target *= 2) > np->rx_max_target))
380 np->rx_target = np->rx_max_target;
381 }
384 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
385 {
386 unsigned short id;
387 struct net_private *np = netdev_priv(dev);
388 netif_tx_request_t *tx;
389 NETIF_RING_IDX i;
390 grant_ref_t ref;
391 unsigned long mfn;
393 if (unlikely(np->tx_full)) {
394 printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
395 dev->name);
396 netif_stop_queue(dev);
397 goto drop;
398 }
400 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
401 PAGE_SIZE)) {
402 struct sk_buff *nskb;
403 if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
404 goto drop;
405 skb_put(nskb, skb->len);
406 memcpy(nskb->data, skb->data, skb->len);
407 nskb->dev = skb->dev;
408 dev_kfree_skb(skb);
409 skb = nskb;
410 }
412 spin_lock_irq(&np->tx_lock);
414 if (np->backend_state != BEST_CONNECTED) {
415 spin_unlock_irq(&np->tx_lock);
416 goto drop;
417 }
419 i = np->tx->req_prod;
421 id = GET_ID_FROM_FREELIST(np->tx_skbs);
422 np->tx_skbs[id] = skb;
424 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
426 tx->id = id;
427 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
428 BUG_ON((signed short)ref < 0);
429 mfn = virt_to_mfn(skb->data);
430 gnttab_grant_foreign_access_ref(
431 ref, np->backend_id, mfn, GNTMAP_readonly);
432 tx->gref = np->grant_tx_ref[id] = ref;
433 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
434 tx->size = skb->len;
435 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
437 wmb(); /* Ensure that backend will see the request. */
438 np->tx->req_prod = i + 1;
440 network_tx_buf_gc(dev);
442 if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
443 np->tx_full = 1;
444 netif_stop_queue(dev);
445 }
447 spin_unlock_irq(&np->tx_lock);
449 np->stats.tx_bytes += skb->len;
450 np->stats.tx_packets++;
452 /* Only notify Xen if we really have to. */
453 mb();
454 if (np->tx->TX_TEST_IDX == i)
455 notify_remote_via_irq(np->irq);
457 return 0;
459 drop:
460 np->stats.tx_dropped++;
461 dev_kfree_skb(skb);
462 return 0;
463 }
465 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
466 {
467 struct net_device *dev = dev_id;
468 struct net_private *np = netdev_priv(dev);
469 unsigned long flags;
471 spin_lock_irqsave(&np->tx_lock, flags);
472 network_tx_buf_gc(dev);
473 spin_unlock_irqrestore(&np->tx_lock, flags);
475 if ((np->rx_resp_cons != np->rx->resp_prod) &&
476 (np->user_state == UST_OPEN))
477 netif_rx_schedule(dev);
479 return IRQ_HANDLED;
480 }
483 static int netif_poll(struct net_device *dev, int *pbudget)
484 {
485 struct net_private *np = netdev_priv(dev);
486 struct sk_buff *skb, *nskb;
487 netif_rx_response_t *rx;
488 NETIF_RING_IDX i, rp;
489 mmu_update_t *mmu = rx_mmu;
490 multicall_entry_t *mcl = rx_mcl;
491 int work_done, budget, more_to_do = 1;
492 struct sk_buff_head rxq;
493 unsigned long flags;
494 unsigned long mfn;
495 grant_ref_t ref;
497 spin_lock(&np->rx_lock);
499 if (np->backend_state != BEST_CONNECTED) {
500 spin_unlock(&np->rx_lock);
501 return 0;
502 }
504 skb_queue_head_init(&rxq);
506 if ((budget = *pbudget) > dev->quota)
507 budget = dev->quota;
508 rp = np->rx->resp_prod;
509 rmb(); /* Ensure we see queued responses up to 'rp'. */
511 for (i = np->rx_resp_cons, work_done = 0;
512 (i != rp) && (work_done < budget);
513 i++, work_done++) {
514 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
515 /*
516 * An error here is very odd. Usually indicates a backend bug,
517 * low-mem condition, or we didn't have reservation headroom.
518 */
519 if (unlikely(rx->status <= 0)) {
520 if (net_ratelimit())
521 printk(KERN_WARNING "Bad rx buffer "
522 "(memory squeeze?).\n");
523 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
524 req.id = rx->id;
525 wmb();
526 np->rx->req_prod++;
527 work_done--;
528 continue;
529 }
531 ref = np->grant_rx_ref[rx->id];
533 if(ref == GRANT_INVALID_REF) {
534 printk(KERN_WARNING "Bad rx grant reference %d "
535 "from dom %d.\n",
536 ref, np->backend_id);
537 np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
538 req.id = rx->id;
539 wmb();
540 np->rx->req_prod++;
541 work_done--;
542 continue;
543 }
545 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
546 mfn = gnttab_end_foreign_transfer_ref(ref);
547 gnttab_release_grant_reference(&np->gref_rx_head, ref);
549 skb = np->rx_skbs[rx->id];
550 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
552 /* NB. We handle skb overflow later. */
553 skb->data = skb->head + rx->offset;
554 skb->len = rx->status;
555 skb->tail = skb->data + skb->len;
557 if ( rx->csum_valid )
558 skb->ip_summed = CHECKSUM_UNNECESSARY;
560 np->stats.rx_packets++;
561 np->stats.rx_bytes += rx->status;
563 /* Remap the page. */
564 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
565 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
566 mmu++;
567 MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
568 pfn_pte_ma(mfn, PAGE_KERNEL), 0);
569 mcl++;
571 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
573 __skb_queue_tail(&rxq, skb);
574 }
576 /* Some pages are no longer absent... */
577 balloon_update_driver_allowance(-work_done);
579 /* Do all the remapping work, and M2P updates, in one big hypercall. */
580 if (likely((mcl - rx_mcl) != 0)) {
581 mcl->op = __HYPERVISOR_mmu_update;
582 mcl->args[0] = (unsigned long)rx_mmu;
583 mcl->args[1] = mmu - rx_mmu;
584 mcl->args[2] = 0;
585 mcl->args[3] = DOMID_SELF;
586 mcl++;
587 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
588 }
590 while ((skb = __skb_dequeue(&rxq)) != NULL) {
591 /*
592 * Enough room in skbuff for the data we were passed? Also,
593 * Linux expects at least 16 bytes headroom in each rx buffer.
594 */
595 if (unlikely(skb->tail > skb->end) ||
596 unlikely((skb->data - skb->head) < 16)) {
597 nskb = NULL;
599 /* Only copy the packet if it fits in the MTU. */
600 if (skb->len <= (dev->mtu + ETH_HLEN)) {
601 if ((skb->tail > skb->end) && net_ratelimit())
602 printk(KERN_INFO "Received packet "
603 "needs %zd bytes more "
604 "headroom.\n",
605 skb->tail - skb->end);
607 nskb = alloc_xen_skb(skb->len + 2);
608 if (nskb != NULL) {
609 skb_reserve(nskb, 2);
610 skb_put(nskb, skb->len);
611 memcpy(nskb->data,
612 skb->data,
613 skb->len);
614 nskb->dev = skb->dev;
615 }
616 }
617 else if (net_ratelimit())
618 printk(KERN_INFO "Received packet too big for "
619 "MTU (%d > %d)\n",
620 skb->len - ETH_HLEN, dev->mtu);
622 /* Reinitialise and then destroy the old skbuff. */
623 skb->len = 0;
624 skb->tail = skb->data;
625 init_skb_shinfo(skb);
626 dev_kfree_skb(skb);
628 /* Switch old for new, if we copied the buffer. */
629 if ((skb = nskb) == NULL)
630 continue;
631 }
633 /* Set the shinfo area, which is hidden behind the data. */
634 init_skb_shinfo(skb);
635 /* Ethernet work: Delayed to here as it peeks the header. */
636 skb->protocol = eth_type_trans(skb, dev);
638 /* Pass it up. */
639 netif_receive_skb(skb);
640 dev->last_rx = jiffies;
641 }
643 np->rx_resp_cons = i;
645 /* If we get a callback with very few responses, reduce fill target. */
646 /* NB. Note exponential increase, linear decrease. */
647 if (((np->rx->req_prod - np->rx->resp_prod) >
648 ((3*np->rx_target) / 4)) &&
649 (--np->rx_target < np->rx_min_target))
650 np->rx_target = np->rx_min_target;
652 network_alloc_rx_buffers(dev);
654 *pbudget -= work_done;
655 dev->quota -= work_done;
657 if (work_done < budget) {
658 local_irq_save(flags);
660 np->rx->event = i + 1;
662 /* Deal with hypervisor racing our resetting of rx_event. */
663 mb();
664 if (np->rx->resp_prod == i) {
665 __netif_rx_complete(dev);
666 more_to_do = 0;
667 }
669 local_irq_restore(flags);
670 }
672 spin_unlock(&np->rx_lock);
674 return more_to_do;
675 }
678 static int network_close(struct net_device *dev)
679 {
680 struct net_private *np = netdev_priv(dev);
681 np->user_state = UST_CLOSED;
682 netif_stop_queue(np->netdev);
683 return 0;
684 }
687 static struct net_device_stats *network_get_stats(struct net_device *dev)
688 {
689 struct net_private *np = netdev_priv(dev);
690 return &np->stats;
691 }
693 static void network_connect(struct net_device *dev)
694 {
695 struct net_private *np;
696 int i, requeue_idx;
697 netif_tx_request_t *tx;
698 struct sk_buff *skb;
700 np = netdev_priv(dev);
701 spin_lock_irq(&np->tx_lock);
702 spin_lock(&np->rx_lock);
704 /* Recovery procedure: */
706 /* Step 1: Reinitialise variables. */
707 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
708 np->rx->event = np->tx->event = 1;
710 /*
711 * Step 2: Rebuild the RX and TX ring contents.
712 * NB. We could just free the queued TX packets now but we hope
713 * that sending them out might do some good. We have to rebuild
714 * the RX ring because some of our pages are currently flipped out
715 * so we can't just free the RX skbs.
716 * NB2. Freelist index entries are always going to be less than
717 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
718 * greater than __PAGE_OFFSET: we use this property to distinguish
719 * them.
720 */
722 /*
723 * Rebuild the TX buffer freelist and the TX ring itself.
724 * NB. This reorders packets. We could keep more private state
725 * to avoid this but maybe it doesn't matter so much given the
726 * interface has been down.
727 */
728 for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
729 if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
730 continue;
732 skb = np->tx_skbs[i];
734 tx = &np->tx->ring[requeue_idx++].req;
736 tx->id = i;
737 gnttab_grant_foreign_access_ref(
738 np->grant_tx_ref[i], np->backend_id,
739 virt_to_mfn(np->tx_skbs[i]->data),
740 GNTMAP_readonly);
741 tx->gref = np->grant_tx_ref[i];
742 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
743 tx->size = skb->len;
744 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
746 np->stats.tx_bytes += skb->len;
747 np->stats.tx_packets++;
748 }
749 wmb();
750 np->tx->req_prod = requeue_idx;
752 /* Rebuild the RX buffer freelist and the RX ring itself. */
753 for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) {
754 if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
755 continue;
756 gnttab_grant_foreign_transfer_ref(
757 np->grant_rx_ref[i], np->backend_id);
758 np->rx->ring[requeue_idx].req.gref =
759 np->grant_rx_ref[i];
760 np->rx->ring[requeue_idx].req.id = i;
761 requeue_idx++;
762 }
763 wmb();
764 np->rx->req_prod = requeue_idx;
766 /*
767 * Step 3: All public and private state should now be sane. Get
768 * ready to start sending and receiving packets and give the driver
769 * domain a kick because we've probably just requeued some
770 * packets.
771 */
772 np->backend_state = BEST_CONNECTED;
773 wmb();
774 notify_remote_via_irq(np->irq);
775 network_tx_buf_gc(dev);
777 if (np->user_state == UST_OPEN)
778 netif_start_queue(dev);
780 spin_unlock(&np->rx_lock);
781 spin_unlock_irq(&np->tx_lock);
782 }
784 static void show_device(struct net_private *np)
785 {
786 #ifdef DEBUG
787 if (np) {
788 IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
789 np->handle,
790 be_state_name[np->backend_state],
791 np->user_state ? "open" : "closed",
792 np->evtchn,
793 np->tx,
794 np->rx);
795 } else {
796 IPRINTK("<vif NULL>\n");
797 }
798 #endif
799 }
801 /*
802 * Move the vif into connected state.
803 * Sets the mac and event channel from the message.
804 * Binds the irq to the event channel.
805 */
806 static void
807 connect_device(struct net_private *np, unsigned int evtchn)
808 {
809 struct net_device *dev = np->netdev;
810 memcpy(dev->dev_addr, np->mac, ETH_ALEN);
811 np->evtchn = evtchn;
812 network_connect(dev);
813 np->irq = bind_evtchn_to_irqhandler(
814 np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
815 (void)send_fake_arp(dev);
816 show_device(np);
817 }
819 static void netif_uninit(struct net_device *dev)
820 {
821 struct net_private *np = netdev_priv(dev);
822 gnttab_free_grant_references(np->gref_tx_head);
823 gnttab_free_grant_references(np->gref_rx_head);
824 }
826 static struct ethtool_ops network_ethtool_ops =
827 {
828 .get_tx_csum = ethtool_op_get_tx_csum,
829 .set_tx_csum = ethtool_op_set_tx_csum,
830 };
832 /** Create a network device.
833 * @param handle device handle
834 * @param val return parameter for created device
835 * @return 0 on success, error code otherwise
836 */
837 static int create_netdev(int handle, struct xenbus_device *dev,
838 struct net_device **val)
839 {
840 int i, err = 0;
841 struct net_device *netdev = NULL;
842 struct net_private *np = NULL;
844 if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
845 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
846 __FUNCTION__);
847 err = -ENOMEM;
848 goto exit;
849 }
851 np = netdev_priv(netdev);
852 np->backend_state = BEST_CLOSED;
853 np->user_state = UST_CLOSED;
854 np->handle = handle;
855 np->xbdev = dev;
857 spin_lock_init(&np->tx_lock);
858 spin_lock_init(&np->rx_lock);
860 skb_queue_head_init(&np->rx_batch);
861 np->rx_target = RX_MIN_TARGET;
862 np->rx_min_target = RX_MIN_TARGET;
863 np->rx_max_target = RX_MAX_TARGET;
865 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
866 for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
867 np->tx_skbs[i] = (void *)((unsigned long) i+1);
868 np->grant_tx_ref[i] = GRANT_INVALID_REF;
869 }
871 for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
872 np->rx_skbs[i] = (void *)((unsigned long) i+1);
873 np->grant_rx_ref[i] = GRANT_INVALID_REF;
874 }
876 /* A grant for every tx ring slot */
877 if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
878 &np->gref_tx_head) < 0) {
879 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
880 err = -ENOMEM;
881 goto exit;
882 }
883 /* A grant for every rx ring slot */
884 if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
885 &np->gref_rx_head) < 0) {
886 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
887 gnttab_free_grant_references(np->gref_tx_head);
888 err = -ENOMEM;
889 goto exit;
890 }
892 netdev->open = network_open;
893 netdev->hard_start_xmit = network_start_xmit;
894 netdev->stop = network_close;
895 netdev->get_stats = network_get_stats;
896 netdev->poll = netif_poll;
897 netdev->uninit = netif_uninit;
898 netdev->weight = 64;
899 netdev->features = NETIF_F_IP_CSUM;
901 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
902 SET_MODULE_OWNER(netdev);
903 SET_NETDEV_DEV(netdev, &dev->dev);
905 if ((err = register_netdev(netdev)) != 0) {
906 printk(KERN_WARNING "%s> register_netdev err=%d\n",
907 __FUNCTION__, err);
908 goto exit_free_grefs;
909 }
911 if ((err = xennet_proc_addif(netdev)) != 0) {
912 unregister_netdev(netdev);
913 goto exit_free_grefs;
914 }
916 np->netdev = netdev;
918 exit:
919 if ((err != 0) && (netdev != NULL))
920 kfree(netdev);
921 else if (val != NULL)
922 *val = netdev;
923 return err;
925 exit_free_grefs:
926 gnttab_free_grant_references(np->gref_tx_head);
927 gnttab_free_grant_references(np->gref_rx_head);
928 goto exit;
929 }
931 static int destroy_netdev(struct net_device *netdev)
932 {
933 #ifdef CONFIG_PROC_FS
934 xennet_proc_delif(netdev);
935 #endif
936 unregister_netdev(netdev);
937 return 0;
938 }
940 /*
941 * We use this notifier to send out a fake ARP reply to reset switches and
942 * router ARP caches when an IP interface is brought up on a VIF.
943 */
944 static int
945 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
946 {
947 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
948 struct net_device *dev = ifa->ifa_dev->dev;
950 /* UP event and is it one of our devices? */
951 if (event == NETDEV_UP && dev->open == network_open)
952 (void)send_fake_arp(dev);
954 return NOTIFY_DONE;
955 }
957 static struct notifier_block notifier_inetdev = {
958 .notifier_call = inetdev_notify,
959 .next = NULL,
960 .priority = 0
961 };
963 static struct xenbus_device_id netfront_ids[] = {
964 { "vif" },
965 { "" }
966 };
968 static void watch_for_status(struct xenbus_watch *watch,
969 const char **vec, unsigned int len)
970 {
971 }
973 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
974 {
975 int err;
976 evtchn_op_t op = {
977 .cmd = EVTCHNOP_alloc_unbound,
978 .u.alloc_unbound.dom = DOMID_SELF,
979 .u.alloc_unbound.remote_dom = info->backend_id };
981 info->tx_ring_ref = GRANT_INVALID_REF;
982 info->rx_ring_ref = GRANT_INVALID_REF;
984 info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
985 if (info->tx == 0) {
986 err = -ENOMEM;
987 xenbus_dev_error(dev, err, "allocating tx ring page");
988 goto out;
989 }
990 info->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
991 if (info->rx == 0) {
992 err = -ENOMEM;
993 xenbus_dev_error(dev, err, "allocating rx ring page");
994 goto out;
995 }
996 memset(info->tx, 0, PAGE_SIZE);
997 memset(info->rx, 0, PAGE_SIZE);
998 info->backend_state = BEST_DISCONNECTED;
1000 err = gnttab_grant_foreign_access(info->backend_id,
1001 virt_to_mfn(info->tx), 0);
1002 if (err < 0) {
1003 xenbus_dev_error(dev, err, "granting access to tx ring page");
1004 goto out;
1006 info->tx_ring_ref = err;
1008 err = gnttab_grant_foreign_access(info->backend_id,
1009 virt_to_mfn(info->rx), 0);
1010 if (err < 0) {
1011 xenbus_dev_error(dev, err, "granting access to rx ring page");
1012 goto out;
1014 info->rx_ring_ref = err;
1016 err = HYPERVISOR_event_channel_op(&op);
1017 if (err) {
1018 xenbus_dev_error(dev, err, "allocating event channel");
1019 goto out;
1022 connect_device(info, op.u.alloc_unbound.port);
1024 return 0;
1026 out:
1027 if (info->tx)
1028 free_page((unsigned long)info->tx);
1029 info->tx = 0;
1030 if (info->rx)
1031 free_page((unsigned long)info->rx);
1032 info->rx = 0;
1034 if (info->tx_ring_ref != GRANT_INVALID_REF)
1035 gnttab_end_foreign_access(info->tx_ring_ref, 0);
1036 info->tx_ring_ref = GRANT_INVALID_REF;
1038 if (info->rx_ring_ref != GRANT_INVALID_REF)
1039 gnttab_end_foreign_access(info->rx_ring_ref, 0);
1040 info->rx_ring_ref = GRANT_INVALID_REF;
1042 return err;
1045 static void netif_free(struct netfront_info *info)
1047 if (info->tx)
1048 free_page((unsigned long)info->tx);
1049 info->tx = 0;
1050 if (info->rx)
1051 free_page((unsigned long)info->rx);
1052 info->rx = 0;
1054 if (info->tx_ring_ref != GRANT_INVALID_REF)
1055 gnttab_end_foreign_access(info->tx_ring_ref, 0);
1056 info->tx_ring_ref = GRANT_INVALID_REF;
1058 if (info->rx_ring_ref != GRANT_INVALID_REF)
1059 gnttab_end_foreign_access(info->rx_ring_ref, 0);
1060 info->rx_ring_ref = GRANT_INVALID_REF;
1062 if (info->irq)
1063 unbind_evtchn_from_irqhandler(info->irq, info->netdev);
1064 info->evtchn = info->irq = 0;
1067 /* Stop network device and free tx/rx queues and irq. */
1068 static void shutdown_device(struct net_private *np)
1070 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1071 spin_lock_irq(&np->tx_lock);
1072 spin_lock(&np->rx_lock);
1073 netif_stop_queue(np->netdev);
1074 /* np->backend_state = BEST_DISCONNECTED; */
1075 spin_unlock(&np->rx_lock);
1076 spin_unlock_irq(&np->tx_lock);
1078 /* Free resources. */
1079 netif_free(np);
1082 /* Common code used when first setting up, and when resuming. */
1083 static int talk_to_backend(struct xenbus_device *dev,
1084 struct netfront_info *info)
1086 char *backend, *mac, *e, *s;
1087 const char *message;
1088 struct xenbus_transaction *xbt;
1089 int err, i;
1091 backend = NULL;
1092 err = xenbus_gather(NULL, dev->nodename,
1093 "backend-id", "%i", &info->backend_id,
1094 "backend", NULL, &backend,
1095 NULL);
1096 if (XENBUS_EXIST_ERR(err))
1097 goto out;
1098 if (backend && strlen(backend) == 0) {
1099 err = -ENOENT;
1100 goto out;
1102 if (err < 0) {
1103 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
1104 dev->nodename);
1105 goto out;
1108 mac = xenbus_read(NULL, dev->nodename, "mac", NULL);
1109 if (IS_ERR(mac)) {
1110 err = PTR_ERR(mac);
1111 xenbus_dev_error(dev, err, "reading %s/mac",
1112 dev->nodename);
1113 goto out;
1115 s = mac;
1116 for (i = 0; i < ETH_ALEN; i++) {
1117 info->mac[i] = simple_strtoul(s, &e, 16);
1118 if (s == e || (e[0] != ':' && e[0] != 0)) {
1119 kfree(mac);
1120 err = -ENOENT;
1121 xenbus_dev_error(dev, err, "parsing %s/mac",
1122 dev->nodename);
1123 goto out;
1125 s = &e[1];
1127 kfree(mac);
1129 /* Create shared ring, alloc event channel. */
1130 err = setup_device(dev, info);
1131 if (err) {
1132 xenbus_dev_error(dev, err, "setting up ring");
1133 goto out;
1136 again:
1137 xbt = xenbus_transaction_start();
1138 if (IS_ERR(xbt)) {
1139 xenbus_dev_error(dev, err, "starting transaction");
1140 goto destroy_ring;
1143 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
1144 info->tx_ring_ref);
1145 if (err) {
1146 message = "writing tx ring-ref";
1147 goto abort_transaction;
1149 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
1150 info->rx_ring_ref);
1151 if (err) {
1152 message = "writing rx ring-ref";
1153 goto abort_transaction;
1155 err = xenbus_printf(xbt, dev->nodename,
1156 "event-channel", "%u", info->evtchn);
1157 if (err) {
1158 message = "writing event-channel";
1159 goto abort_transaction;
1162 err = xenbus_transaction_end(xbt, 0);
1163 if (err) {
1164 if (err == -EAGAIN)
1165 goto again;
1166 xenbus_dev_error(dev, err, "completing transaction");
1167 goto destroy_ring;
1170 info->watch.node = backend;
1171 info->watch.callback = watch_for_status;
1172 err = register_xenbus_watch(&info->watch);
1173 if (err) {
1174 message = "registering watch on backend";
1175 goto destroy_ring;
1178 info->backend = backend;
1180 return 0;
1182 abort_transaction:
1183 xenbus_transaction_end(xbt, 1);
1184 xenbus_dev_error(dev, err, "%s", message);
1185 destroy_ring:
1186 shutdown_device(info);
1187 out:
1188 if (backend)
1189 kfree(backend);
1190 return err;
1193 /*
1194 * Setup supplies the backend dir, virtual device.
1195 * We place an event channel and shared frame entries.
1196 * We watch backend to wait if it's ok.
1197 */
1198 static int netfront_probe(struct xenbus_device *dev,
1199 const struct xenbus_device_id *id)
1201 int err;
1202 struct net_device *netdev;
1203 struct netfront_info *info;
1204 unsigned int handle;
1206 err = xenbus_scanf(NULL, dev->nodename, "handle", "%u", &handle);
1207 if (XENBUS_EXIST_ERR(err))
1208 return err;
1209 if (err < 0) {
1210 xenbus_dev_error(dev, err, "reading handle");
1211 return err;
1214 err = create_netdev(handle, dev, &netdev);
1215 if (err) {
1216 xenbus_dev_error(dev, err, "creating netdev");
1217 return err;
1220 info = netdev_priv(netdev);
1221 dev->data = info;
1223 err = talk_to_backend(dev, info);
1224 if (err) {
1225 destroy_netdev(netdev);
1226 kfree(netdev);
1227 dev->data = NULL;
1228 return err;
1231 return 0;
1234 static int netfront_remove(struct xenbus_device *dev)
1236 struct netfront_info *info = dev->data;
1238 if (info->backend)
1239 unregister_xenbus_watch(&info->watch);
1241 netif_free(info);
1243 kfree(info->backend);
1244 kfree(info);
1246 return 0;
1249 static int netfront_suspend(struct xenbus_device *dev)
1251 struct netfront_info *info = dev->data;
1252 unregister_xenbus_watch(&info->watch);
1253 kfree(info->backend);
1254 info->backend = NULL;
1255 return 0;
1258 static int netfront_resume(struct xenbus_device *dev)
1260 struct netfront_info *info = dev->data;
1261 netif_free(info);
1262 return talk_to_backend(dev, info);
1265 static struct xenbus_driver netfront = {
1266 .name = "vif",
1267 .owner = THIS_MODULE,
1268 .ids = netfront_ids,
1269 .probe = netfront_probe,
1270 .remove = netfront_remove,
1271 .resume = netfront_resume,
1272 .suspend = netfront_suspend,
1273 };
1275 static void __init init_net_xenbus(void)
1277 xenbus_register_driver(&netfront);
1280 static int __init netif_init(void)
1282 int err = 0;
1284 if (xen_start_info->flags & SIF_INITDOMAIN)
1285 return 0;
1287 if ((err = xennet_proc_init()) != 0)
1288 return err;
1290 IPRINTK("Initialising virtual ethernet driver.\n");
1292 (void)register_inetaddr_notifier(&notifier_inetdev);
1294 init_net_xenbus();
1296 return err;
1299 static void netif_exit(void)
1303 #ifdef CONFIG_PROC_FS
1305 #define TARGET_MIN 0UL
1306 #define TARGET_MAX 1UL
1307 #define TARGET_CUR 2UL
1309 static int xennet_proc_read(
1310 char *page, char **start, off_t off, int count, int *eof, void *data)
1312 struct net_device *dev =
1313 (struct net_device *)((unsigned long)data & ~3UL);
1314 struct net_private *np = netdev_priv(dev);
1315 int len = 0, which_target = (long)data & 3;
1317 switch (which_target)
1319 case TARGET_MIN:
1320 len = sprintf(page, "%d\n", np->rx_min_target);
1321 break;
1322 case TARGET_MAX:
1323 len = sprintf(page, "%d\n", np->rx_max_target);
1324 break;
1325 case TARGET_CUR:
1326 len = sprintf(page, "%d\n", np->rx_target);
1327 break;
1330 *eof = 1;
1331 return len;
1334 static int xennet_proc_write(
1335 struct file *file, const char __user *buffer,
1336 unsigned long count, void *data)
1338 struct net_device *dev =
1339 (struct net_device *)((unsigned long)data & ~3UL);
1340 struct net_private *np = netdev_priv(dev);
1341 int which_target = (long)data & 3;
1342 char string[64];
1343 long target;
1345 if (!capable(CAP_SYS_ADMIN))
1346 return -EPERM;
1348 if (count <= 1)
1349 return -EBADMSG; /* runt */
1350 if (count > sizeof(string))
1351 return -EFBIG; /* too long */
1353 if (copy_from_user(string, buffer, count))
1354 return -EFAULT;
1355 string[sizeof(string)-1] = '\0';
1357 target = simple_strtol(string, NULL, 10);
1358 if (target < RX_MIN_TARGET)
1359 target = RX_MIN_TARGET;
1360 if (target > RX_MAX_TARGET)
1361 target = RX_MAX_TARGET;
1363 spin_lock(&np->rx_lock);
1365 switch (which_target)
1367 case TARGET_MIN:
1368 if (target > np->rx_max_target)
1369 np->rx_max_target = target;
1370 np->rx_min_target = target;
1371 if (target > np->rx_target)
1372 np->rx_target = target;
1373 break;
1374 case TARGET_MAX:
1375 if (target < np->rx_min_target)
1376 np->rx_min_target = target;
1377 np->rx_max_target = target;
1378 if (target < np->rx_target)
1379 np->rx_target = target;
1380 break;
1381 case TARGET_CUR:
1382 break;
1385 network_alloc_rx_buffers(dev);
1387 spin_unlock(&np->rx_lock);
1389 return count;
1392 static int xennet_proc_init(void)
1394 if (proc_mkdir("xen/net", NULL) == NULL)
1395 return -ENOMEM;
1396 return 0;
1399 static int xennet_proc_addif(struct net_device *dev)
1401 struct proc_dir_entry *dir, *min, *max, *cur;
1402 char name[30];
1404 sprintf(name, "xen/net/%s", dev->name);
1406 dir = proc_mkdir(name, NULL);
1407 if (!dir)
1408 goto nomem;
1410 min = create_proc_entry("rxbuf_min", 0644, dir);
1411 max = create_proc_entry("rxbuf_max", 0644, dir);
1412 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1413 if (!min || !max || !cur)
1414 goto nomem;
1416 min->read_proc = xennet_proc_read;
1417 min->write_proc = xennet_proc_write;
1418 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1420 max->read_proc = xennet_proc_read;
1421 max->write_proc = xennet_proc_write;
1422 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1424 cur->read_proc = xennet_proc_read;
1425 cur->write_proc = xennet_proc_write;
1426 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1428 return 0;
1430 nomem:
1431 xennet_proc_delif(dev);
1432 return -ENOMEM;
1435 static void xennet_proc_delif(struct net_device *dev)
1437 char name[30];
1439 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1440 remove_proc_entry(name, NULL);
1442 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1443 remove_proc_entry(name, NULL);
1445 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1446 remove_proc_entry(name, NULL);
1448 sprintf(name, "xen/net/%s", dev->name);
1449 remove_proc_entry(name, NULL);
1452 #endif
1454 module_init(netif_init);
1455 module_exit(netif_exit);
1457 /*
1458 * Local variables:
1459 * c-file-style: "linux"
1460 * indent-tabs-mode: t
1461 * c-indent-level: 8
1462 * c-basic-offset: 8
1463 * tab-width: 8
1464 * End:
1465 */