ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 10086:21bd82ade5cc

[NET] front: Remove tx_full and unnecessary queue operations

The tx_full variable merely mirrors information already present in
the XOFF bit on the net device. The net device architecture itself
is quite mature and can be trusted by Xen to maintain its state
correctly.

Also, it's pointless to stop the queue in close_netdev since it can
be waken up anyway since there could be a softirq running on another
CPU. All of this is handled by unregister_netdev anyway.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 18 09:38:37 2006 +0100 (2006-05-18)
parents 91c77df11b43
children 64fbdbc31dba
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/netdevice.h>
41 #include <linux/inetdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/bitops.h>
46 #include <linux/proc_fs.h>
47 #include <linux/ethtool.h>
48 #include <linux/in.h>
49 #include <net/sock.h>
50 #include <net/pkt_sched.h>
51 #include <net/arp.h>
52 #include <net/route.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <xen/evtchn.h>
56 #include <xen/xenbus.h>
57 #include <xen/interface/io/netif.h>
58 #include <xen/interface/memory.h>
59 #include <xen/balloon.h>
60 #include <asm/page.h>
61 #include <asm/uaccess.h>
62 #include <xen/interface/grant_table.h>
63 #include <xen/gnttab.h>
64 #include <xen/net_driver_util.h>
66 #define GRANT_INVALID_REF 0
68 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
69 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
71 static inline void init_skb_shinfo(struct sk_buff *skb)
72 {
73 atomic_set(&(skb_shinfo(skb)->dataref), 1);
74 skb_shinfo(skb)->nr_frags = 0;
75 skb_shinfo(skb)->frag_list = NULL;
76 }
78 struct netfront_info
79 {
80 struct list_head list;
81 struct net_device *netdev;
83 struct net_device_stats stats;
85 netif_tx_front_ring_t tx;
86 netif_rx_front_ring_t rx;
88 spinlock_t tx_lock;
89 spinlock_t rx_lock;
91 unsigned int handle;
92 unsigned int evtchn, irq;
94 /* What is the status of our connection to the remote backend? */
95 #define BEST_CLOSED 0
96 #define BEST_DISCONNECTED 1
97 #define BEST_CONNECTED 2
98 unsigned int backend_state;
100 /* Is this interface open or closed (down or up)? */
101 #define UST_CLOSED 0
102 #define UST_OPEN 1
103 unsigned int user_state;
105 /* Receive-ring batched refills. */
106 #define RX_MIN_TARGET 8
107 #define RX_DFL_MIN_TARGET 64
108 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
109 int rx_min_target, rx_max_target, rx_target;
110 struct sk_buff_head rx_batch;
112 struct timer_list rx_refill_timer;
114 /*
115 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
116 * array is an index into a chain of free entries.
117 */
118 struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
119 struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
121 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
122 grant_ref_t gref_tx_head;
123 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
124 grant_ref_t gref_rx_head;
125 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
127 struct xenbus_device *xbdev;
128 int tx_ring_ref;
129 int rx_ring_ref;
130 u8 mac[ETH_ALEN];
132 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
133 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
134 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
135 };
137 /*
138 * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
139 */
141 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
142 {
143 list[id] = list[0];
144 list[0] = (void *)(unsigned long)id;
145 }
147 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
148 {
149 unsigned int id = (unsigned int)(unsigned long)list[0];
150 list[0] = list[id];
151 return id;
152 }
154 #ifdef DEBUG
155 static char *be_state_name[] = {
156 [BEST_CLOSED] = "closed",
157 [BEST_DISCONNECTED] = "disconnected",
158 [BEST_CONNECTED] = "connected",
159 };
160 #endif
162 #define DPRINTK(fmt, args...) pr_debug("netfront (%s:%d) " fmt, \
163 __FUNCTION__, __LINE__, ##args)
164 #define IPRINTK(fmt, args...) \
165 printk(KERN_INFO "netfront: " fmt, ##args)
166 #define WPRINTK(fmt, args...) \
167 printk(KERN_WARNING "netfront: " fmt, ##args)
170 static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
171 static int setup_device(struct xenbus_device *, struct netfront_info *);
172 static int create_netdev(int, struct xenbus_device *, struct net_device **);
174 static void netfront_closing(struct xenbus_device *);
176 static void end_access(int, void *);
177 static void netif_disconnect_backend(struct netfront_info *);
178 static void close_netdev(struct netfront_info *);
179 static void netif_free(struct netfront_info *);
181 static void show_device(struct netfront_info *);
183 static void network_connect(struct net_device *);
184 static void network_tx_buf_gc(struct net_device *);
185 static void network_alloc_rx_buffers(struct net_device *);
186 static int send_fake_arp(struct net_device *);
188 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
190 #ifdef CONFIG_PROC_FS
191 static int xennet_proc_init(void);
192 static int xennet_proc_addif(struct net_device *dev);
193 static void xennet_proc_delif(struct net_device *dev);
194 #else
195 #define xennet_proc_init() (0)
196 #define xennet_proc_addif(d) (0)
197 #define xennet_proc_delif(d) ((void)0)
198 #endif
201 /**
202 * Entry point to this code when a new device is created. Allocate the basic
203 * structures and the ring buffers for communication with the backend, and
204 * inform the backend of the appropriate details for those. Switch to
205 * Connected state.
206 */
207 static int netfront_probe(struct xenbus_device *dev,
208 const struct xenbus_device_id *id)
209 {
210 int err;
211 struct net_device *netdev;
212 struct netfront_info *info;
213 unsigned int handle;
215 err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%u", &handle);
216 if (err != 1) {
217 xenbus_dev_fatal(dev, err, "reading handle");
218 return err;
219 }
221 err = create_netdev(handle, dev, &netdev);
222 if (err) {
223 xenbus_dev_fatal(dev, err, "creating netdev");
224 return err;
225 }
227 info = netdev_priv(netdev);
228 dev->data = info;
230 err = talk_to_backend(dev, info);
231 if (err) {
232 kfree(info);
233 dev->data = NULL;
234 return err;
235 }
237 return 0;
238 }
241 /**
242 * We are reconnecting to the backend, due to a suspend/resume, or a backend
243 * driver restart. We tear down our netif structure and recreate it, but
244 * leave the device-layer structures intact so that this is transparent to the
245 * rest of the kernel.
246 */
247 static int netfront_resume(struct xenbus_device *dev)
248 {
249 struct netfront_info *info = dev->data;
251 DPRINTK("%s\n", dev->nodename);
253 netif_disconnect_backend(info);
254 return talk_to_backend(dev, info);
255 }
258 /* Common code used when first setting up, and when resuming. */
259 static int talk_to_backend(struct xenbus_device *dev,
260 struct netfront_info *info)
261 {
262 const char *message;
263 xenbus_transaction_t xbt;
264 int err;
266 err = xen_net_read_mac(dev, info->mac);
267 if (err) {
268 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
269 goto out;
270 }
272 /* Create shared ring, alloc event channel. */
273 err = setup_device(dev, info);
274 if (err)
275 goto out;
277 again:
278 err = xenbus_transaction_start(&xbt);
279 if (err) {
280 xenbus_dev_fatal(dev, err, "starting transaction");
281 goto destroy_ring;
282 }
284 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
285 info->tx_ring_ref);
286 if (err) {
287 message = "writing tx ring-ref";
288 goto abort_transaction;
289 }
290 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
291 info->rx_ring_ref);
292 if (err) {
293 message = "writing rx ring-ref";
294 goto abort_transaction;
295 }
296 err = xenbus_printf(xbt, dev->nodename,
297 "event-channel", "%u", info->evtchn);
298 if (err) {
299 message = "writing event-channel";
300 goto abort_transaction;
301 }
303 err = xenbus_transaction_end(xbt, 0);
304 if (err) {
305 if (err == -EAGAIN)
306 goto again;
307 xenbus_dev_fatal(dev, err, "completing transaction");
308 goto destroy_ring;
309 }
311 xenbus_switch_state(dev, XenbusStateConnected);
313 return 0;
315 abort_transaction:
316 xenbus_transaction_end(xbt, 1);
317 xenbus_dev_fatal(dev, err, "%s", message);
318 destroy_ring:
319 netif_free(info);
320 out:
321 return err;
322 }
325 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
326 {
327 netif_tx_sring_t *txs;
328 netif_rx_sring_t *rxs;
329 int err;
330 struct net_device *netdev = info->netdev;
332 info->tx_ring_ref = GRANT_INVALID_REF;
333 info->rx_ring_ref = GRANT_INVALID_REF;
334 info->rx.sring = NULL;
335 info->tx.sring = NULL;
336 info->irq = 0;
338 txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
339 if (!txs) {
340 err = -ENOMEM;
341 xenbus_dev_fatal(dev, err, "allocating tx ring page");
342 goto fail;
343 }
344 rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
345 if (!rxs) {
346 err = -ENOMEM;
347 xenbus_dev_fatal(dev, err, "allocating rx ring page");
348 goto fail;
349 }
350 memset(txs, 0, PAGE_SIZE);
351 memset(rxs, 0, PAGE_SIZE);
352 info->backend_state = BEST_DISCONNECTED;
354 SHARED_RING_INIT(txs);
355 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
357 SHARED_RING_INIT(rxs);
358 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
360 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
361 if (err < 0)
362 goto fail;
363 info->tx_ring_ref = err;
365 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
366 if (err < 0)
367 goto fail;
368 info->rx_ring_ref = err;
370 err = xenbus_alloc_evtchn(dev, &info->evtchn);
371 if (err)
372 goto fail;
374 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
375 network_connect(netdev);
376 info->irq = bind_evtchn_to_irqhandler(
377 info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
378 netdev);
379 (void)send_fake_arp(netdev);
380 show_device(info);
382 return 0;
384 fail:
385 netif_free(info);
386 return err;
387 }
390 /**
391 * Callback received when the backend's state changes.
392 */
393 static void backend_changed(struct xenbus_device *dev,
394 XenbusState backend_state)
395 {
396 DPRINTK("\n");
398 switch (backend_state) {
399 case XenbusStateInitialising:
400 case XenbusStateInitWait:
401 case XenbusStateInitialised:
402 case XenbusStateConnected:
403 case XenbusStateUnknown:
404 case XenbusStateClosed:
405 break;
407 case XenbusStateClosing:
408 netfront_closing(dev);
409 break;
410 }
411 }
414 /** Send a packet on a net device to encourage switches to learn the
415 * MAC. We send a fake ARP request.
416 *
417 * @param dev device
418 * @return 0 on success, error code otherwise
419 */
420 static int send_fake_arp(struct net_device *dev)
421 {
422 struct sk_buff *skb;
423 u32 src_ip, dst_ip;
425 dst_ip = INADDR_BROADCAST;
426 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
428 /* No IP? Then nothing to do. */
429 if (src_ip == 0)
430 return 0;
432 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
433 dst_ip, dev, src_ip,
434 /*dst_hw*/ NULL, /*src_hw*/ NULL,
435 /*target_hw*/ dev->dev_addr);
436 if (skb == NULL)
437 return -ENOMEM;
439 return dev_queue_xmit(skb);
440 }
443 static int network_open(struct net_device *dev)
444 {
445 struct netfront_info *np = netdev_priv(dev);
447 memset(&np->stats, 0, sizeof(np->stats));
449 np->user_state = UST_OPEN;
451 network_alloc_rx_buffers(dev);
452 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
454 netif_start_queue(dev);
456 return 0;
457 }
459 static void network_tx_buf_gc(struct net_device *dev)
460 {
461 RING_IDX i, prod;
462 unsigned short id;
463 struct netfront_info *np = netdev_priv(dev);
464 struct sk_buff *skb;
466 if (np->backend_state != BEST_CONNECTED)
467 return;
469 do {
470 prod = np->tx.sring->rsp_prod;
471 rmb(); /* Ensure we see responses up to 'rp'. */
473 for (i = np->tx.rsp_cons; i != prod; i++) {
474 id = RING_GET_RESPONSE(&np->tx, i)->id;
475 skb = np->tx_skbs[id];
476 if (unlikely(gnttab_query_foreign_access(
477 np->grant_tx_ref[id]) != 0)) {
478 printk(KERN_ALERT "network_tx_buf_gc: warning "
479 "-- grant still in use by backend "
480 "domain.\n");
481 goto out;
482 }
483 gnttab_end_foreign_access_ref(
484 np->grant_tx_ref[id], GNTMAP_readonly);
485 gnttab_release_grant_reference(
486 &np->gref_tx_head, np->grant_tx_ref[id]);
487 np->grant_tx_ref[id] = GRANT_INVALID_REF;
488 add_id_to_freelist(np->tx_skbs, id);
489 dev_kfree_skb_irq(skb);
490 }
492 np->tx.rsp_cons = prod;
494 /*
495 * Set a new event, then check for race with update of tx_cons.
496 * Note that it is essential to schedule a callback, no matter
497 * how few buffers are pending. Even if there is space in the
498 * transmit ring, higher layers may be blocked because too much
499 * data is outstanding: in such cases notification from Xen is
500 * likely to be the only kick that we'll get.
501 */
502 np->tx.sring->rsp_event =
503 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
504 mb();
505 } while (prod != np->tx.sring->rsp_prod);
507 out:
508 if (unlikely(netif_queue_stopped(dev)) &&
509 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE) &&
510 !gnttab_empty_grant_references(&np->gref_tx_head)) {
511 if (np->user_state == UST_OPEN)
512 netif_wake_queue(dev);
513 }
514 }
517 static void rx_refill_timeout(unsigned long data)
518 {
519 struct net_device *dev = (struct net_device *)data;
520 netif_rx_schedule(dev);
521 }
524 static void network_alloc_rx_buffers(struct net_device *dev)
525 {
526 unsigned short id;
527 struct netfront_info *np = netdev_priv(dev);
528 struct sk_buff *skb;
529 int i, batch_target;
530 RING_IDX req_prod = np->rx.req_prod_pvt;
531 struct xen_memory_reservation reservation;
532 grant_ref_t ref;
534 if (unlikely(np->backend_state != BEST_CONNECTED))
535 return;
537 /*
538 * Allocate skbuffs greedily, even though we batch updates to the
539 * receive ring. This creates a less bursty demand on the memory
540 * allocator, so should reduce the chance of failed allocation requests
541 * both for ourself and for other kernel subsystems.
542 */
543 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
544 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
545 /*
546 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
547 * tailroom then round down to SKB_DATA_ALIGN boundary.
548 */
549 skb = __dev_alloc_skb(
550 ((PAGE_SIZE - sizeof(struct skb_shared_info)) &
551 (-SKB_DATA_ALIGN(1))) - 16,
552 GFP_ATOMIC|__GFP_NOWARN);
553 if (skb == NULL) {
554 /* Any skbuffs queued for refill? Force them out. */
555 if (i != 0)
556 goto refill;
557 /* Could not allocate any skbuffs. Try again later. */
558 mod_timer(&np->rx_refill_timer,
559 jiffies + (HZ/10));
560 return;
561 }
562 __skb_queue_tail(&np->rx_batch, skb);
563 }
565 /* Is the batch large enough to be worthwhile? */
566 if (i < (np->rx_target/2))
567 return;
569 /* Adjust our fill target if we risked running out of buffers. */
570 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
571 ((np->rx_target *= 2) > np->rx_max_target))
572 np->rx_target = np->rx_max_target;
574 refill:
575 for (i = 0; ; i++) {
576 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
577 break;
579 skb->dev = dev;
581 id = get_id_from_freelist(np->rx_skbs);
583 np->rx_skbs[id] = skb;
585 RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
586 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
587 BUG_ON((signed short)ref < 0);
588 np->grant_rx_ref[id] = ref;
589 gnttab_grant_foreign_transfer_ref(ref,
590 np->xbdev->otherend_id,
591 __pa(skb->head) >> PAGE_SHIFT);
592 RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
593 np->rx_pfn_array[i] = virt_to_mfn(skb->head);
595 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
596 /* Remove this page before passing back to Xen. */
597 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
598 INVALID_P2M_ENTRY);
599 MULTI_update_va_mapping(np->rx_mcl+i,
600 (unsigned long)skb->head,
601 __pte(0), 0);
602 }
603 }
605 /* Tell the ballon driver what is going on. */
606 balloon_update_driver_allowance(i);
608 set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array);
609 reservation.nr_extents = i;
610 reservation.extent_order = 0;
611 reservation.address_bits = 0;
612 reservation.domid = DOMID_SELF;
614 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
615 /* After all PTEs have been zapped, flush the TLB. */
616 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
617 UVMF_TLB_FLUSH|UVMF_ALL;
619 /* Give away a batch of pages. */
620 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
621 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
622 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
624 /* Zap PTEs and give away pages in one big multicall. */
625 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
627 /* Check return status of HYPERVISOR_memory_op(). */
628 if (unlikely(np->rx_mcl[i].result != i))
629 panic("Unable to reduce memory reservation\n");
630 } else
631 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
632 &reservation) != i)
633 panic("Unable to reduce memory reservation\n");
635 /* Above is a suitable barrier to ensure backend will see requests. */
636 np->rx.req_prod_pvt = req_prod + i;
637 RING_PUSH_REQUESTS(&np->rx);
638 }
641 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
642 {
643 unsigned short id;
644 struct netfront_info *np = netdev_priv(dev);
645 netif_tx_request_t *tx;
646 RING_IDX i;
647 grant_ref_t ref;
648 unsigned long mfn;
649 int notify;
651 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
652 PAGE_SIZE)) {
653 struct sk_buff *nskb;
654 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC|__GFP_NOWARN);
655 if (unlikely(nskb == NULL))
656 goto drop;
657 skb_put(nskb, skb->len);
658 memcpy(nskb->data, skb->data, skb->len);
659 /* Copy only the header fields we use in this driver. */
660 nskb->dev = skb->dev;
661 nskb->ip_summed = skb->ip_summed;
662 nskb->proto_data_valid = skb->proto_data_valid;
663 dev_kfree_skb(skb);
664 skb = nskb;
665 }
667 spin_lock_irq(&np->tx_lock);
669 if (np->backend_state != BEST_CONNECTED) {
670 spin_unlock_irq(&np->tx_lock);
671 goto drop;
672 }
674 i = np->tx.req_prod_pvt;
676 id = get_id_from_freelist(np->tx_skbs);
677 np->tx_skbs[id] = skb;
679 tx = RING_GET_REQUEST(&np->tx, i);
681 tx->id = id;
682 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
683 BUG_ON((signed short)ref < 0);
684 mfn = virt_to_mfn(skb->data);
685 gnttab_grant_foreign_access_ref(
686 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
687 tx->gref = np->grant_tx_ref[id] = ref;
688 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
689 tx->size = skb->len;
691 tx->flags = 0;
692 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
693 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
694 if (skb->proto_data_valid) /* remote but checksummed? */
695 tx->flags |= NETTXF_data_validated;
697 np->tx.req_prod_pvt = i + 1;
698 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
699 if (notify)
700 notify_remote_via_irq(np->irq);
702 network_tx_buf_gc(dev);
704 if (RING_FULL(&np->tx) ||
705 gnttab_empty_grant_references(&np->gref_tx_head)) {
706 netif_stop_queue(dev);
707 }
709 spin_unlock_irq(&np->tx_lock);
711 np->stats.tx_bytes += skb->len;
712 np->stats.tx_packets++;
714 return 0;
716 drop:
717 np->stats.tx_dropped++;
718 dev_kfree_skb(skb);
719 return 0;
720 }
722 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
723 {
724 struct net_device *dev = dev_id;
725 struct netfront_info *np = netdev_priv(dev);
726 unsigned long flags;
728 spin_lock_irqsave(&np->tx_lock, flags);
729 network_tx_buf_gc(dev);
730 spin_unlock_irqrestore(&np->tx_lock, flags);
732 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
733 (np->user_state == UST_OPEN))
734 netif_rx_schedule(dev);
736 return IRQ_HANDLED;
737 }
740 static int netif_poll(struct net_device *dev, int *pbudget)
741 {
742 struct netfront_info *np = netdev_priv(dev);
743 struct sk_buff *skb, *nskb;
744 netif_rx_response_t *rx;
745 RING_IDX i, rp;
746 mmu_update_t *mmu = np->rx_mmu;
747 multicall_entry_t *mcl = np->rx_mcl;
748 int work_done, budget, more_to_do = 1;
749 struct sk_buff_head rxq;
750 unsigned long flags;
751 unsigned long mfn;
752 grant_ref_t ref;
754 spin_lock(&np->rx_lock);
756 if (np->backend_state != BEST_CONNECTED) {
757 spin_unlock(&np->rx_lock);
758 return 0;
759 }
761 skb_queue_head_init(&rxq);
763 if ((budget = *pbudget) > dev->quota)
764 budget = dev->quota;
765 rp = np->rx.sring->rsp_prod;
766 rmb(); /* Ensure we see queued responses up to 'rp'. */
768 for (i = np->rx.rsp_cons, work_done = 0;
769 (i != rp) && (work_done < budget);
770 i++, work_done++) {
771 rx = RING_GET_RESPONSE(&np->rx, i);
773 /*
774 * This definitely indicates a bug, either in this driver or
775 * in the backend driver. In future this should flag the bad
776 * situation to the system controller to reboot the backed.
777 */
778 if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
779 WPRINTK("Bad rx response id %d.\n", rx->id);
780 work_done--;
781 continue;
782 }
784 /* Memory pressure, insufficient buffer headroom, ... */
785 if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
786 if (net_ratelimit())
787 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
788 rx->id, rx->status);
789 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
790 rx->id;
791 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
792 ref;
793 np->rx.req_prod_pvt++;
794 RING_PUSH_REQUESTS(&np->rx);
795 work_done--;
796 continue;
797 }
799 gnttab_release_grant_reference(&np->gref_rx_head, ref);
800 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
802 skb = np->rx_skbs[rx->id];
803 add_id_to_freelist(np->rx_skbs, rx->id);
805 /* NB. We handle skb overflow later. */
806 skb->data = skb->head + rx->offset;
807 skb->len = rx->status;
808 skb->tail = skb->data + skb->len;
810 /*
811 * Old backends do not assert data_validated but we
812 * can infer it from csum_blank so test both flags.
813 */
814 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) {
815 skb->ip_summed = CHECKSUM_UNNECESSARY;
816 skb->proto_data_valid = 1;
817 } else {
818 skb->ip_summed = CHECKSUM_NONE;
819 skb->proto_data_valid = 0;
820 }
821 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
823 np->stats.rx_packets++;
824 np->stats.rx_bytes += rx->status;
826 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
827 /* Remap the page. */
828 MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
829 pfn_pte_ma(mfn, PAGE_KERNEL),
830 0);
831 mcl++;
832 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
833 | MMU_MACHPHYS_UPDATE;
834 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
835 mmu++;
837 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
838 mfn);
839 }
841 __skb_queue_tail(&rxq, skb);
842 }
844 /* Some pages are no longer absent... */
845 balloon_update_driver_allowance(-work_done);
847 /* Do all the remapping work, and M2P updates, in one big hypercall. */
848 if (likely((mcl - np->rx_mcl) != 0)) {
849 mcl->op = __HYPERVISOR_mmu_update;
850 mcl->args[0] = (unsigned long)np->rx_mmu;
851 mcl->args[1] = mmu - np->rx_mmu;
852 mcl->args[2] = 0;
853 mcl->args[3] = DOMID_SELF;
854 mcl++;
855 (void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
856 }
858 while ((skb = __skb_dequeue(&rxq)) != NULL) {
859 if (skb->len > (dev->mtu + ETH_HLEN + 4)) {
860 if (net_ratelimit())
861 printk(KERN_INFO "Received packet too big for "
862 "MTU (%d > %d)\n",
863 skb->len - ETH_HLEN - 4, dev->mtu);
864 skb->len = 0;
865 skb->tail = skb->data;
866 init_skb_shinfo(skb);
867 dev_kfree_skb(skb);
868 continue;
869 }
871 /*
872 * Enough room in skbuff for the data we were passed? Also,
873 * Linux expects at least 16 bytes headroom in each rx buffer.
874 */
875 if (unlikely(skb->tail > skb->end) ||
876 unlikely((skb->data - skb->head) < 16)) {
877 if (net_ratelimit()) {
878 if (skb->tail > skb->end)
879 printk(KERN_INFO "Received packet "
880 "is %zd bytes beyond tail.\n",
881 skb->tail - skb->end);
882 else
883 printk(KERN_INFO "Received packet "
884 "is %zd bytes before head.\n",
885 16 - (skb->data - skb->head));
886 }
888 nskb = __dev_alloc_skb(skb->len + 2,
889 GFP_ATOMIC|__GFP_NOWARN);
890 if (nskb != NULL) {
891 skb_reserve(nskb, 2);
892 skb_put(nskb, skb->len);
893 memcpy(nskb->data, skb->data, skb->len);
894 /* Copy any other fields we already set up. */
895 nskb->dev = skb->dev;
896 nskb->ip_summed = skb->ip_summed;
897 nskb->proto_data_valid = skb->proto_data_valid;
898 nskb->proto_csum_blank = skb->proto_csum_blank;
899 }
901 /* Reinitialise and then destroy the old skbuff. */
902 skb->len = 0;
903 skb->tail = skb->data;
904 init_skb_shinfo(skb);
905 dev_kfree_skb(skb);
907 /* Switch old for new, if we copied the buffer. */
908 if ((skb = nskb) == NULL)
909 continue;
910 }
912 /* Set the shinfo area, which is hidden behind the data. */
913 init_skb_shinfo(skb);
914 /* Ethernet work: Delayed to here as it peeks the header. */
915 skb->protocol = eth_type_trans(skb, dev);
917 /* Pass it up. */
918 netif_receive_skb(skb);
919 dev->last_rx = jiffies;
920 }
922 np->rx.rsp_cons = i;
924 /* If we get a callback with very few responses, reduce fill target. */
925 /* NB. Note exponential increase, linear decrease. */
926 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
927 ((3*np->rx_target) / 4)) &&
928 (--np->rx_target < np->rx_min_target))
929 np->rx_target = np->rx_min_target;
931 network_alloc_rx_buffers(dev);
933 *pbudget -= work_done;
934 dev->quota -= work_done;
936 if (work_done < budget) {
937 local_irq_save(flags);
939 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
940 if (!more_to_do)
941 __netif_rx_complete(dev);
943 local_irq_restore(flags);
944 }
946 spin_unlock(&np->rx_lock);
948 return more_to_do;
949 }
952 static int network_close(struct net_device *dev)
953 {
954 struct netfront_info *np = netdev_priv(dev);
955 np->user_state = UST_CLOSED;
956 netif_stop_queue(np->netdev);
957 return 0;
958 }
961 static struct net_device_stats *network_get_stats(struct net_device *dev)
962 {
963 struct netfront_info *np = netdev_priv(dev);
964 return &np->stats;
965 }
967 static void network_connect(struct net_device *dev)
968 {
969 struct netfront_info *np;
970 int i, requeue_idx;
971 netif_tx_request_t *tx;
972 struct sk_buff *skb;
974 np = netdev_priv(dev);
975 spin_lock_irq(&np->tx_lock);
976 spin_lock(&np->rx_lock);
978 /* Recovery procedure: */
980 /*
981 * Step 1: Rebuild the RX and TX ring contents.
982 * NB. We could just free the queued TX packets now but we hope
983 * that sending them out might do some good. We have to rebuild
984 * the RX ring because some of our pages are currently flipped out
985 * so we can't just free the RX skbs.
986 * NB2. Freelist index entries are always going to be less than
987 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
988 * greater than PAGE_OFFSET: we use this property to distinguish
989 * them.
990 */
992 /*
993 * Rebuild the TX buffer freelist and the TX ring itself.
994 * NB. This reorders packets. We could keep more private state
995 * to avoid this but maybe it doesn't matter so much given the
996 * interface has been down.
997 */
998 for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
999 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1000 continue;
1002 skb = np->tx_skbs[i];
1004 tx = RING_GET_REQUEST(&np->tx, requeue_idx);
1005 requeue_idx++;
1007 tx->id = i;
1008 gnttab_grant_foreign_access_ref(
1009 np->grant_tx_ref[i], np->xbdev->otherend_id,
1010 virt_to_mfn(np->tx_skbs[i]->data),
1011 GNTMAP_readonly);
1012 tx->gref = np->grant_tx_ref[i];
1013 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
1014 tx->size = skb->len;
1015 tx->flags = 0;
1016 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
1017 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
1018 if (skb->proto_data_valid) /* remote but checksummed? */
1019 tx->flags |= NETTXF_data_validated;
1021 np->stats.tx_bytes += skb->len;
1022 np->stats.tx_packets++;
1025 np->tx.req_prod_pvt = requeue_idx;
1026 RING_PUSH_REQUESTS(&np->tx);
1028 /* Rebuild the RX buffer freelist and the RX ring itself. */
1029 for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
1030 if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
1031 continue;
1032 gnttab_grant_foreign_transfer_ref(
1033 np->grant_rx_ref[i], np->xbdev->otherend_id,
1034 __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
1035 RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
1036 np->grant_rx_ref[i];
1037 RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
1038 requeue_idx++;
1041 np->rx.req_prod_pvt = requeue_idx;
1042 RING_PUSH_REQUESTS(&np->rx);
1044 /*
1045 * Step 2: All public and private state should now be sane. Get
1046 * ready to start sending and receiving packets and give the driver
1047 * domain a kick because we've probably just requeued some
1048 * packets.
1049 */
1050 np->backend_state = BEST_CONNECTED;
1051 notify_remote_via_irq(np->irq);
1052 network_tx_buf_gc(dev);
1054 if (np->user_state == UST_OPEN)
1055 netif_start_queue(dev);
1057 spin_unlock(&np->rx_lock);
1058 spin_unlock_irq(&np->tx_lock);
1061 static void show_device(struct netfront_info *np)
1063 #ifdef DEBUG
1064 if (np) {
1065 IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
1066 np->handle,
1067 be_state_name[np->backend_state],
1068 np->user_state ? "open" : "closed",
1069 np->evtchn,
1070 np->tx,
1071 np->rx);
1072 } else
1073 IPRINTK("<vif NULL>\n");
1074 #endif
1077 static void netif_uninit(struct net_device *dev)
1079 struct netfront_info *np = netdev_priv(dev);
1080 gnttab_free_grant_references(np->gref_tx_head);
1081 gnttab_free_grant_references(np->gref_rx_head);
1084 static struct ethtool_ops network_ethtool_ops =
1086 .get_tx_csum = ethtool_op_get_tx_csum,
1087 .set_tx_csum = ethtool_op_set_tx_csum,
1088 };
1090 /*
1091 * Nothing to do here. Virtual interface is point-to-point and the
1092 * physical interface is probably promiscuous anyway.
1093 */
1094 static void network_set_multicast_list(struct net_device *dev)
1098 /** Create a network device.
1099 * @param handle device handle
1100 * @param val return parameter for created device
1101 * @return 0 on success, error code otherwise
1102 */
1103 static int create_netdev(int handle, struct xenbus_device *dev,
1104 struct net_device **val)
1106 int i, err = 0;
1107 struct net_device *netdev = NULL;
1108 struct netfront_info *np = NULL;
1110 if ((netdev = alloc_etherdev(sizeof(struct netfront_info))) == NULL) {
1111 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1112 __FUNCTION__);
1113 err = -ENOMEM;
1114 goto exit;
1117 np = netdev_priv(netdev);
1118 np->backend_state = BEST_CLOSED;
1119 np->user_state = UST_CLOSED;
1120 np->handle = handle;
1121 np->xbdev = dev;
1123 spin_lock_init(&np->tx_lock);
1124 spin_lock_init(&np->rx_lock);
1126 skb_queue_head_init(&np->rx_batch);
1127 np->rx_target = RX_DFL_MIN_TARGET;
1128 np->rx_min_target = RX_DFL_MIN_TARGET;
1129 np->rx_max_target = RX_MAX_TARGET;
1131 init_timer(&np->rx_refill_timer);
1132 np->rx_refill_timer.data = (unsigned long)netdev;
1133 np->rx_refill_timer.function = rx_refill_timeout;
1135 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
1136 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1137 np->tx_skbs[i] = (void *)((unsigned long) i+1);
1138 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1141 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1142 np->rx_skbs[i] = (void *)((unsigned long) i+1);
1143 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1146 /* A grant for every tx ring slot */
1147 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1148 &np->gref_tx_head) < 0) {
1149 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1150 err = -ENOMEM;
1151 goto exit;
1153 /* A grant for every rx ring slot */
1154 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1155 &np->gref_rx_head) < 0) {
1156 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1157 gnttab_free_grant_references(np->gref_tx_head);
1158 err = -ENOMEM;
1159 goto exit;
1162 netdev->open = network_open;
1163 netdev->hard_start_xmit = network_start_xmit;
1164 netdev->stop = network_close;
1165 netdev->get_stats = network_get_stats;
1166 netdev->poll = netif_poll;
1167 netdev->set_multicast_list = network_set_multicast_list;
1168 netdev->uninit = netif_uninit;
1169 netdev->weight = 64;
1170 netdev->features = NETIF_F_IP_CSUM;
1172 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
1173 SET_MODULE_OWNER(netdev);
1174 SET_NETDEV_DEV(netdev, &dev->dev);
1176 if ((err = register_netdev(netdev)) != 0) {
1177 printk(KERN_WARNING "%s> register_netdev err=%d\n",
1178 __FUNCTION__, err);
1179 goto exit_free_grefs;
1182 if ((err = xennet_proc_addif(netdev)) != 0) {
1183 unregister_netdev(netdev);
1184 goto exit_free_grefs;
1187 np->netdev = netdev;
1189 exit:
1190 if (err != 0)
1191 kfree(netdev);
1192 else if (val != NULL)
1193 *val = netdev;
1194 return err;
1196 exit_free_grefs:
1197 gnttab_free_grant_references(np->gref_tx_head);
1198 gnttab_free_grant_references(np->gref_rx_head);
1199 goto exit;
1202 /*
1203 * We use this notifier to send out a fake ARP reply to reset switches and
1204 * router ARP caches when an IP interface is brought up on a VIF.
1205 */
1206 static int
1207 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1209 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1210 struct net_device *dev = ifa->ifa_dev->dev;
1212 /* UP event and is it one of our devices? */
1213 if (event == NETDEV_UP && dev->open == network_open)
1214 (void)send_fake_arp(dev);
1216 return NOTIFY_DONE;
1220 /* ** Close down ** */
1223 /**
1224 * Handle the change of state of the backend to Closing. We must delete our
1225 * device-layer structures now, to ensure that writes are flushed through to
1226 * the backend. Once is this done, we can switch to Closed in
1227 * acknowledgement.
1228 */
1229 static void netfront_closing(struct xenbus_device *dev)
1231 struct netfront_info *info = dev->data;
1233 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1235 close_netdev(info);
1237 xenbus_switch_state(dev, XenbusStateClosed);
1241 static int netfront_remove(struct xenbus_device *dev)
1243 struct netfront_info *info = dev->data;
1245 DPRINTK("%s\n", dev->nodename);
1247 netif_disconnect_backend(info);
1248 free_netdev(info->netdev);
1250 return 0;
1254 static void close_netdev(struct netfront_info *info)
1256 #ifdef CONFIG_PROC_FS
1257 xennet_proc_delif(info->netdev);
1258 #endif
1260 del_timer_sync(&info->rx_refill_timer);
1262 unregister_netdev(info->netdev);
1266 static void netif_disconnect_backend(struct netfront_info *info)
1268 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1269 spin_lock_irq(&info->tx_lock);
1270 spin_lock(&info->rx_lock);
1271 info->backend_state = BEST_DISCONNECTED;
1272 spin_unlock(&info->rx_lock);
1273 spin_unlock_irq(&info->tx_lock);
1275 if (info->irq)
1276 unbind_from_irqhandler(info->irq, info->netdev);
1277 info->evtchn = info->irq = 0;
1279 end_access(info->tx_ring_ref, info->tx.sring);
1280 end_access(info->rx_ring_ref, info->rx.sring);
1281 info->tx_ring_ref = GRANT_INVALID_REF;
1282 info->rx_ring_ref = GRANT_INVALID_REF;
1283 info->tx.sring = NULL;
1284 info->rx.sring = NULL;
1288 static void netif_free(struct netfront_info *info)
1290 close_netdev(info);
1291 netif_disconnect_backend(info);
1292 free_netdev(info->netdev);
1296 static void end_access(int ref, void *page)
1298 if (ref != GRANT_INVALID_REF)
1299 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1303 /* ** Driver registration ** */
1306 static struct xenbus_device_id netfront_ids[] = {
1307 { "vif" },
1308 { "" }
1309 };
1312 static struct xenbus_driver netfront = {
1313 .name = "vif",
1314 .owner = THIS_MODULE,
1315 .ids = netfront_ids,
1316 .probe = netfront_probe,
1317 .remove = netfront_remove,
1318 .resume = netfront_resume,
1319 .otherend_changed = backend_changed,
1320 };
1323 static struct notifier_block notifier_inetdev = {
1324 .notifier_call = inetdev_notify,
1325 .next = NULL,
1326 .priority = 0
1327 };
1329 static int __init netif_init(void)
1331 int err = 0;
1333 if (xen_start_info->flags & SIF_INITDOMAIN)
1334 return 0;
1336 if ((err = xennet_proc_init()) != 0)
1337 return err;
1339 IPRINTK("Initialising virtual ethernet driver.\n");
1341 (void)register_inetaddr_notifier(&notifier_inetdev);
1343 return xenbus_register_frontend(&netfront);
1345 module_init(netif_init);
1348 static void netif_exit(void)
1350 unregister_inetaddr_notifier(&notifier_inetdev);
1352 return xenbus_unregister_driver(&netfront);
1354 module_exit(netif_exit);
1356 MODULE_LICENSE("Dual BSD/GPL");
1359 /* ** /proc **/
1362 #ifdef CONFIG_PROC_FS
1364 #define TARGET_MIN 0UL
1365 #define TARGET_MAX 1UL
1366 #define TARGET_CUR 2UL
1368 static int xennet_proc_read(
1369 char *page, char **start, off_t off, int count, int *eof, void *data)
1371 struct net_device *dev =
1372 (struct net_device *)((unsigned long)data & ~3UL);
1373 struct netfront_info *np = netdev_priv(dev);
1374 int len = 0, which_target = (long)data & 3;
1376 switch (which_target) {
1377 case TARGET_MIN:
1378 len = sprintf(page, "%d\n", np->rx_min_target);
1379 break;
1380 case TARGET_MAX:
1381 len = sprintf(page, "%d\n", np->rx_max_target);
1382 break;
1383 case TARGET_CUR:
1384 len = sprintf(page, "%d\n", np->rx_target);
1385 break;
1388 *eof = 1;
1389 return len;
1392 static int xennet_proc_write(
1393 struct file *file, const char __user *buffer,
1394 unsigned long count, void *data)
1396 struct net_device *dev =
1397 (struct net_device *)((unsigned long)data & ~3UL);
1398 struct netfront_info *np = netdev_priv(dev);
1399 int which_target = (long)data & 3;
1400 char string[64];
1401 long target;
1403 if (!capable(CAP_SYS_ADMIN))
1404 return -EPERM;
1406 if (count <= 1)
1407 return -EBADMSG; /* runt */
1408 if (count > sizeof(string))
1409 return -EFBIG; /* too long */
1411 if (copy_from_user(string, buffer, count))
1412 return -EFAULT;
1413 string[sizeof(string)-1] = '\0';
1415 target = simple_strtol(string, NULL, 10);
1416 if (target < RX_MIN_TARGET)
1417 target = RX_MIN_TARGET;
1418 if (target > RX_MAX_TARGET)
1419 target = RX_MAX_TARGET;
1421 spin_lock(&np->rx_lock);
1423 switch (which_target) {
1424 case TARGET_MIN:
1425 if (target > np->rx_max_target)
1426 np->rx_max_target = target;
1427 np->rx_min_target = target;
1428 if (target > np->rx_target)
1429 np->rx_target = target;
1430 break;
1431 case TARGET_MAX:
1432 if (target < np->rx_min_target)
1433 np->rx_min_target = target;
1434 np->rx_max_target = target;
1435 if (target < np->rx_target)
1436 np->rx_target = target;
1437 break;
1438 case TARGET_CUR:
1439 break;
1442 network_alloc_rx_buffers(dev);
1444 spin_unlock(&np->rx_lock);
1446 return count;
1449 static int xennet_proc_init(void)
1451 if (proc_mkdir("xen/net", NULL) == NULL)
1452 return -ENOMEM;
1453 return 0;
1456 static int xennet_proc_addif(struct net_device *dev)
1458 struct proc_dir_entry *dir, *min, *max, *cur;
1459 char name[30];
1461 sprintf(name, "xen/net/%s", dev->name);
1463 dir = proc_mkdir(name, NULL);
1464 if (!dir)
1465 goto nomem;
1467 min = create_proc_entry("rxbuf_min", 0644, dir);
1468 max = create_proc_entry("rxbuf_max", 0644, dir);
1469 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1470 if (!min || !max || !cur)
1471 goto nomem;
1473 min->read_proc = xennet_proc_read;
1474 min->write_proc = xennet_proc_write;
1475 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1477 max->read_proc = xennet_proc_read;
1478 max->write_proc = xennet_proc_write;
1479 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1481 cur->read_proc = xennet_proc_read;
1482 cur->write_proc = xennet_proc_write;
1483 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1485 return 0;
1487 nomem:
1488 xennet_proc_delif(dev);
1489 return -ENOMEM;
1492 static void xennet_proc_delif(struct net_device *dev)
1494 char name[30];
1496 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1497 remove_proc_entry(name, NULL);
1499 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1500 remove_proc_entry(name, NULL);
1502 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1503 remove_proc_entry(name, NULL);
1505 sprintf(name, "xen/net/%s", dev->name);
1506 remove_proc_entry(name, NULL);
1509 #endif
1512 /*
1513 * Local variables:
1514 * c-file-style: "linux"
1515 * indent-tabs-mode: t
1516 * c-indent-level: 8
1517 * c-basic-offset: 8
1518 * tab-width: 8
1519 * End:
1520 */