ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 9601:bbd1057d38f0

Use PAGE_OFFSET instead of __PAGE_OFFSET in netfront.c.
__PAGE_OFFSET is not defined on all platforms.
Linux/ia64 and Linux/ppc don't have __PAGE_OFFSET definition.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 06 09:32:55 2006 +0100 (2006-04-06)
parents 806d04252761
children b32bc0c5648d
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/netdevice.h>
41 #include <linux/inetdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/bitops.h>
46 #include <linux/proc_fs.h>
47 #include <linux/ethtool.h>
48 #include <linux/in.h>
49 #include <net/sock.h>
50 #include <net/pkt_sched.h>
51 #include <net/arp.h>
52 #include <net/route.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <xen/evtchn.h>
56 #include <xen/xenbus.h>
57 #include <xen/interface/io/netif.h>
58 #include <xen/interface/memory.h>
59 #include <xen/balloon.h>
60 #include <asm/page.h>
61 #include <asm/uaccess.h>
62 #include <xen/interface/grant_table.h>
63 #include <xen/gnttab.h>
64 #include <xen/net_driver_util.h>
66 #define GRANT_INVALID_REF 0
68 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
69 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
71 static inline void init_skb_shinfo(struct sk_buff *skb)
72 {
73 atomic_set(&(skb_shinfo(skb)->dataref), 1);
74 skb_shinfo(skb)->nr_frags = 0;
75 skb_shinfo(skb)->frag_list = NULL;
76 }
78 struct netfront_info
79 {
80 struct list_head list;
81 struct net_device *netdev;
83 struct net_device_stats stats;
84 unsigned int tx_full;
86 netif_tx_front_ring_t tx;
87 netif_rx_front_ring_t rx;
89 spinlock_t tx_lock;
90 spinlock_t rx_lock;
92 unsigned int handle;
93 unsigned int evtchn, irq;
95 /* What is the status of our connection to the remote backend? */
96 #define BEST_CLOSED 0
97 #define BEST_DISCONNECTED 1
98 #define BEST_CONNECTED 2
99 unsigned int backend_state;
101 /* Is this interface open or closed (down or up)? */
102 #define UST_CLOSED 0
103 #define UST_OPEN 1
104 unsigned int user_state;
106 /* Receive-ring batched refills. */
107 #define RX_MIN_TARGET 8
108 #define RX_DFL_MIN_TARGET 64
109 #define RX_MAX_TARGET NET_RX_RING_SIZE
110 int rx_min_target, rx_max_target, rx_target;
111 struct sk_buff_head rx_batch;
113 struct timer_list rx_refill_timer;
115 /*
116 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
117 * array is an index into a chain of free entries.
118 */
119 struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
120 struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
122 grant_ref_t gref_tx_head;
123 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
124 grant_ref_t gref_rx_head;
125 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
127 struct xenbus_device *xbdev;
128 int tx_ring_ref;
129 int rx_ring_ref;
130 u8 mac[ETH_ALEN];
132 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
133 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
134 mmu_update_t rx_mmu[NET_RX_RING_SIZE];
135 };
137 /*
138 * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
139 */
141 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
142 {
143 list[id] = list[0];
144 list[0] = (void *)(unsigned long)id;
145 }
147 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
148 {
149 unsigned int id = (unsigned int)(unsigned long)list[0];
150 list[0] = list[id];
151 return id;
152 }
154 #ifdef DEBUG
155 static char *be_state_name[] = {
156 [BEST_CLOSED] = "closed",
157 [BEST_DISCONNECTED] = "disconnected",
158 [BEST_CONNECTED] = "connected",
159 };
160 #endif
162 #define DPRINTK(fmt, args...) pr_debug("netfront (%s:%d) " fmt, \
163 __FUNCTION__, __LINE__, ##args)
164 #define IPRINTK(fmt, args...) \
165 printk(KERN_INFO "netfront: " fmt, ##args)
166 #define WPRINTK(fmt, args...) \
167 printk(KERN_WARNING "netfront: " fmt, ##args)
170 static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
171 static int setup_device(struct xenbus_device *, struct netfront_info *);
172 static int create_netdev(int, struct xenbus_device *, struct net_device **);
174 static void netfront_closing(struct xenbus_device *);
176 static void end_access(int, void *);
177 static void netif_disconnect_backend(struct netfront_info *);
178 static void close_netdev(struct netfront_info *);
179 static void netif_free(struct netfront_info *);
181 static void show_device(struct netfront_info *);
183 static void network_connect(struct net_device *);
184 static void network_tx_buf_gc(struct net_device *);
185 static void network_alloc_rx_buffers(struct net_device *);
186 static int send_fake_arp(struct net_device *);
188 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
190 #ifdef CONFIG_PROC_FS
191 static int xennet_proc_init(void);
192 static int xennet_proc_addif(struct net_device *dev);
193 static void xennet_proc_delif(struct net_device *dev);
194 #else
195 #define xennet_proc_init() (0)
196 #define xennet_proc_addif(d) (0)
197 #define xennet_proc_delif(d) ((void)0)
198 #endif
201 /**
202 * Entry point to this code when a new device is created. Allocate the basic
203 * structures and the ring buffers for communication with the backend, and
204 * inform the backend of the appropriate details for those. Switch to
205 * Connected state.
206 */
207 static int netfront_probe(struct xenbus_device *dev,
208 const struct xenbus_device_id *id)
209 {
210 int err;
211 struct net_device *netdev;
212 struct netfront_info *info;
213 unsigned int handle;
215 err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%u", &handle);
216 if (err != 1) {
217 xenbus_dev_fatal(dev, err, "reading handle");
218 return err;
219 }
221 err = create_netdev(handle, dev, &netdev);
222 if (err) {
223 xenbus_dev_fatal(dev, err, "creating netdev");
224 return err;
225 }
227 info = netdev_priv(netdev);
228 dev->data = info;
230 err = talk_to_backend(dev, info);
231 if (err) {
232 kfree(info);
233 dev->data = NULL;
234 return err;
235 }
237 return 0;
238 }
241 /**
242 * We are reconnecting to the backend, due to a suspend/resume, or a backend
243 * driver restart. We tear down our netif structure and recreate it, but
244 * leave the device-layer structures intact so that this is transparent to the
245 * rest of the kernel.
246 */
247 static int netfront_resume(struct xenbus_device *dev)
248 {
249 struct netfront_info *info = dev->data;
251 DPRINTK("%s\n", dev->nodename);
253 netif_disconnect_backend(info);
254 return talk_to_backend(dev, info);
255 }
258 /* Common code used when first setting up, and when resuming. */
259 static int talk_to_backend(struct xenbus_device *dev,
260 struct netfront_info *info)
261 {
262 const char *message;
263 xenbus_transaction_t xbt;
264 int err;
266 err = xen_net_read_mac(dev, info->mac);
267 if (err) {
268 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
269 goto out;
270 }
272 /* Create shared ring, alloc event channel. */
273 err = setup_device(dev, info);
274 if (err)
275 goto out;
277 again:
278 err = xenbus_transaction_start(&xbt);
279 if (err) {
280 xenbus_dev_fatal(dev, err, "starting transaction");
281 goto destroy_ring;
282 }
284 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
285 info->tx_ring_ref);
286 if (err) {
287 message = "writing tx ring-ref";
288 goto abort_transaction;
289 }
290 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
291 info->rx_ring_ref);
292 if (err) {
293 message = "writing rx ring-ref";
294 goto abort_transaction;
295 }
296 err = xenbus_printf(xbt, dev->nodename,
297 "event-channel", "%u", info->evtchn);
298 if (err) {
299 message = "writing event-channel";
300 goto abort_transaction;
301 }
303 err = xenbus_printf(xbt, dev->nodename,
304 "state", "%d", XenbusStateConnected);
305 if (err) {
306 message = "writing frontend XenbusStateConnected";
307 goto abort_transaction;
308 }
310 err = xenbus_transaction_end(xbt, 0);
311 if (err) {
312 if (err == -EAGAIN)
313 goto again;
314 xenbus_dev_fatal(dev, err, "completing transaction");
315 goto destroy_ring;
316 }
318 return 0;
320 abort_transaction:
321 xenbus_transaction_end(xbt, 1);
322 xenbus_dev_fatal(dev, err, "%s", message);
323 destroy_ring:
324 netif_free(info);
325 out:
326 return err;
327 }
330 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
331 {
332 netif_tx_sring_t *txs;
333 netif_rx_sring_t *rxs;
334 int err;
335 struct net_device *netdev = info->netdev;
337 info->tx_ring_ref = GRANT_INVALID_REF;
338 info->rx_ring_ref = GRANT_INVALID_REF;
339 info->rx.sring = NULL;
340 info->tx.sring = NULL;
341 info->irq = 0;
343 txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
344 if (!txs) {
345 err = -ENOMEM;
346 xenbus_dev_fatal(dev, err, "allocating tx ring page");
347 goto fail;
348 }
349 rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
350 if (!rxs) {
351 err = -ENOMEM;
352 xenbus_dev_fatal(dev, err, "allocating rx ring page");
353 goto fail;
354 }
355 memset(txs, 0, PAGE_SIZE);
356 memset(rxs, 0, PAGE_SIZE);
357 info->backend_state = BEST_DISCONNECTED;
359 SHARED_RING_INIT(txs);
360 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
362 SHARED_RING_INIT(rxs);
363 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
365 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
366 if (err < 0)
367 goto fail;
368 info->tx_ring_ref = err;
370 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
371 if (err < 0)
372 goto fail;
373 info->rx_ring_ref = err;
375 err = xenbus_alloc_evtchn(dev, &info->evtchn);
376 if (err)
377 goto fail;
379 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
380 network_connect(netdev);
381 info->irq = bind_evtchn_to_irqhandler(
382 info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
383 netdev);
384 (void)send_fake_arp(netdev);
385 show_device(info);
387 return 0;
389 fail:
390 netif_free(info);
391 return err;
392 }
395 /**
396 * Callback received when the backend's state changes.
397 */
398 static void backend_changed(struct xenbus_device *dev,
399 XenbusState backend_state)
400 {
401 DPRINTK("\n");
403 switch (backend_state) {
404 case XenbusStateInitialising:
405 case XenbusStateInitWait:
406 case XenbusStateInitialised:
407 case XenbusStateConnected:
408 case XenbusStateUnknown:
409 case XenbusStateClosed:
410 break;
412 case XenbusStateClosing:
413 netfront_closing(dev);
414 break;
415 }
416 }
419 /** Send a packet on a net device to encourage switches to learn the
420 * MAC. We send a fake ARP request.
421 *
422 * @param dev device
423 * @return 0 on success, error code otherwise
424 */
425 static int send_fake_arp(struct net_device *dev)
426 {
427 struct sk_buff *skb;
428 u32 src_ip, dst_ip;
430 dst_ip = INADDR_BROADCAST;
431 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
433 /* No IP? Then nothing to do. */
434 if (src_ip == 0)
435 return 0;
437 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
438 dst_ip, dev, src_ip,
439 /*dst_hw*/ NULL, /*src_hw*/ NULL,
440 /*target_hw*/ dev->dev_addr);
441 if (skb == NULL)
442 return -ENOMEM;
444 return dev_queue_xmit(skb);
445 }
448 static int network_open(struct net_device *dev)
449 {
450 struct netfront_info *np = netdev_priv(dev);
452 memset(&np->stats, 0, sizeof(np->stats));
454 np->user_state = UST_OPEN;
456 network_alloc_rx_buffers(dev);
457 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
459 netif_start_queue(dev);
461 return 0;
462 }
464 static void network_tx_buf_gc(struct net_device *dev)
465 {
466 RING_IDX i, prod;
467 unsigned short id;
468 struct netfront_info *np = netdev_priv(dev);
469 struct sk_buff *skb;
471 if (np->backend_state != BEST_CONNECTED)
472 return;
474 do {
475 prod = np->tx.sring->rsp_prod;
476 rmb(); /* Ensure we see responses up to 'rp'. */
478 for (i = np->tx.rsp_cons; i != prod; i++) {
479 id = RING_GET_RESPONSE(&np->tx, i)->id;
480 skb = np->tx_skbs[id];
481 if (unlikely(gnttab_query_foreign_access(
482 np->grant_tx_ref[id]) != 0)) {
483 printk(KERN_ALERT "network_tx_buf_gc: warning "
484 "-- grant still in use by backend "
485 "domain.\n");
486 goto out;
487 }
488 gnttab_end_foreign_access_ref(
489 np->grant_tx_ref[id], GNTMAP_readonly);
490 gnttab_release_grant_reference(
491 &np->gref_tx_head, np->grant_tx_ref[id]);
492 np->grant_tx_ref[id] = GRANT_INVALID_REF;
493 add_id_to_freelist(np->tx_skbs, id);
494 dev_kfree_skb_irq(skb);
495 }
497 np->tx.rsp_cons = prod;
499 /*
500 * Set a new event, then check for race with update of tx_cons.
501 * Note that it is essential to schedule a callback, no matter
502 * how few buffers are pending. Even if there is space in the
503 * transmit ring, higher layers may be blocked because too much
504 * data is outstanding: in such cases notification from Xen is
505 * likely to be the only kick that we'll get.
506 */
507 np->tx.sring->rsp_event =
508 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
509 mb();
510 } while (prod != np->tx.sring->rsp_prod);
512 out:
513 if (np->tx_full &&
514 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
515 np->tx_full = 0;
516 if (np->user_state == UST_OPEN)
517 netif_wake_queue(dev);
518 }
519 }
522 static void rx_refill_timeout(unsigned long data)
523 {
524 struct net_device *dev = (struct net_device *)data;
525 netif_rx_schedule(dev);
526 }
529 static void network_alloc_rx_buffers(struct net_device *dev)
530 {
531 unsigned short id;
532 struct netfront_info *np = netdev_priv(dev);
533 struct sk_buff *skb;
534 int i, batch_target;
535 RING_IDX req_prod = np->rx.req_prod_pvt;
536 struct xen_memory_reservation reservation;
537 grant_ref_t ref;
539 if (unlikely(np->backend_state != BEST_CONNECTED))
540 return;
542 /*
543 * Allocate skbuffs greedily, even though we batch updates to the
544 * receive ring. This creates a less bursty demand on the memory
545 * allocator, so should reduce the chance of failed allocation requests
546 * both for ourself and for other kernel subsystems.
547 */
548 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
549 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
550 /*
551 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
552 * tailroom then round down to SKB_DATA_ALIGN boundary.
553 */
554 skb = __dev_alloc_skb(
555 ((PAGE_SIZE - sizeof(struct skb_shared_info)) &
556 (-SKB_DATA_ALIGN(1))) - 16,
557 GFP_ATOMIC|__GFP_NOWARN);
558 if (skb == NULL) {
559 /* Any skbuffs queued for refill? Force them out. */
560 if (i != 0)
561 goto refill;
562 /* Could not allocate any skbuffs. Try again later. */
563 mod_timer(&np->rx_refill_timer,
564 jiffies + (HZ/10));
565 return;
566 }
567 __skb_queue_tail(&np->rx_batch, skb);
568 }
570 /* Is the batch large enough to be worthwhile? */
571 if (i < (np->rx_target/2))
572 return;
574 /* Adjust our fill target if we risked running out of buffers. */
575 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
576 ((np->rx_target *= 2) > np->rx_max_target))
577 np->rx_target = np->rx_max_target;
579 refill:
580 for (i = 0; ; i++) {
581 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
582 break;
584 skb->dev = dev;
586 id = get_id_from_freelist(np->rx_skbs);
588 np->rx_skbs[id] = skb;
590 RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
591 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
592 BUG_ON((signed short)ref < 0);
593 np->grant_rx_ref[id] = ref;
594 gnttab_grant_foreign_transfer_ref(ref,
595 np->xbdev->otherend_id,
596 __pa(skb->head) >> PAGE_SHIFT);
597 RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
598 np->rx_pfn_array[i] = virt_to_mfn(skb->head);
600 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
601 /* Remove this page before passing back to Xen. */
602 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
603 INVALID_P2M_ENTRY);
604 MULTI_update_va_mapping(np->rx_mcl+i,
605 (unsigned long)skb->head,
606 __pte(0), 0);
607 }
608 }
610 /* Tell the ballon driver what is going on. */
611 balloon_update_driver_allowance(i);
613 reservation.extent_start = np->rx_pfn_array;
614 reservation.nr_extents = i;
615 reservation.extent_order = 0;
616 reservation.address_bits = 0;
617 reservation.domid = DOMID_SELF;
619 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
620 /* After all PTEs have been zapped, flush the TLB. */
621 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
622 UVMF_TLB_FLUSH|UVMF_ALL;
624 /* Give away a batch of pages. */
625 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
626 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
627 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
629 /* Zap PTEs and give away pages in one big multicall. */
630 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
632 /* Check return status of HYPERVISOR_memory_op(). */
633 if (unlikely(np->rx_mcl[i].result != i))
634 panic("Unable to reduce memory reservation\n");
635 } else
636 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
637 &reservation) != i)
638 panic("Unable to reduce memory reservation\n");
640 /* Above is a suitable barrier to ensure backend will see requests. */
641 np->rx.req_prod_pvt = req_prod + i;
642 RING_PUSH_REQUESTS(&np->rx);
643 }
646 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 {
648 unsigned short id;
649 struct netfront_info *np = netdev_priv(dev);
650 netif_tx_request_t *tx;
651 RING_IDX i;
652 grant_ref_t ref;
653 unsigned long mfn;
654 int notify;
656 if (unlikely(np->tx_full)) {
657 printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
658 dev->name);
659 netif_stop_queue(dev);
660 goto drop;
661 }
663 if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
664 PAGE_SIZE)) {
665 struct sk_buff *nskb;
666 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC|__GFP_NOWARN);
667 if (unlikely(nskb == NULL))
668 goto drop;
669 skb_put(nskb, skb->len);
670 memcpy(nskb->data, skb->data, skb->len);
671 nskb->dev = skb->dev;
672 dev_kfree_skb(skb);
673 skb = nskb;
674 }
676 spin_lock_irq(&np->tx_lock);
678 if (np->backend_state != BEST_CONNECTED) {
679 spin_unlock_irq(&np->tx_lock);
680 goto drop;
681 }
683 i = np->tx.req_prod_pvt;
685 id = get_id_from_freelist(np->tx_skbs);
686 np->tx_skbs[id] = skb;
688 tx = RING_GET_REQUEST(&np->tx, i);
690 tx->id = id;
691 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
692 BUG_ON((signed short)ref < 0);
693 mfn = virt_to_mfn(skb->data);
694 gnttab_grant_foreign_access_ref(
695 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
696 tx->gref = np->grant_tx_ref[id] = ref;
697 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
698 tx->size = skb->len;
700 tx->flags = 0;
701 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
702 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
703 if (skb->proto_data_valid) /* remote but checksummed? */
704 tx->flags |= NETTXF_data_validated;
706 np->tx.req_prod_pvt = i + 1;
707 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
708 if (notify)
709 notify_remote_via_irq(np->irq);
711 network_tx_buf_gc(dev);
713 if (RING_FULL(&np->tx)) {
714 np->tx_full = 1;
715 netif_stop_queue(dev);
716 }
718 spin_unlock_irq(&np->tx_lock);
720 np->stats.tx_bytes += skb->len;
721 np->stats.tx_packets++;
723 return 0;
725 drop:
726 np->stats.tx_dropped++;
727 dev_kfree_skb(skb);
728 return 0;
729 }
731 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
732 {
733 struct net_device *dev = dev_id;
734 struct netfront_info *np = netdev_priv(dev);
735 unsigned long flags;
737 spin_lock_irqsave(&np->tx_lock, flags);
738 network_tx_buf_gc(dev);
739 spin_unlock_irqrestore(&np->tx_lock, flags);
741 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
742 (np->user_state == UST_OPEN))
743 netif_rx_schedule(dev);
745 return IRQ_HANDLED;
746 }
749 static int netif_poll(struct net_device *dev, int *pbudget)
750 {
751 struct netfront_info *np = netdev_priv(dev);
752 struct sk_buff *skb, *nskb;
753 netif_rx_response_t *rx;
754 RING_IDX i, rp;
755 mmu_update_t *mmu = np->rx_mmu;
756 multicall_entry_t *mcl = np->rx_mcl;
757 int work_done, budget, more_to_do = 1;
758 struct sk_buff_head rxq;
759 unsigned long flags;
760 unsigned long mfn;
761 grant_ref_t ref;
763 spin_lock(&np->rx_lock);
765 if (np->backend_state != BEST_CONNECTED) {
766 spin_unlock(&np->rx_lock);
767 return 0;
768 }
770 skb_queue_head_init(&rxq);
772 if ((budget = *pbudget) > dev->quota)
773 budget = dev->quota;
774 rp = np->rx.sring->rsp_prod;
775 rmb(); /* Ensure we see queued responses up to 'rp'. */
777 for (i = np->rx.rsp_cons, work_done = 0;
778 (i != rp) && (work_done < budget);
779 i++, work_done++) {
780 rx = RING_GET_RESPONSE(&np->rx, i);
782 /*
783 * This definitely indicates a bug, either in this driver or
784 * in the backend driver. In future this should flag the bad
785 * situation to the system controller to reboot the backed.
786 */
787 if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
788 WPRINTK("Bad rx response id %d.\n", rx->id);
789 work_done--;
790 continue;
791 }
793 /* Memory pressure, insufficient buffer headroom, ... */
794 if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
795 if (net_ratelimit())
796 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
797 rx->id, rx->status);
798 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
799 rx->id;
800 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
801 ref;
802 np->rx.req_prod_pvt++;
803 RING_PUSH_REQUESTS(&np->rx);
804 work_done--;
805 continue;
806 }
808 gnttab_release_grant_reference(&np->gref_rx_head, ref);
809 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
811 skb = np->rx_skbs[rx->id];
812 add_id_to_freelist(np->rx_skbs, rx->id);
814 /* NB. We handle skb overflow later. */
815 skb->data = skb->head + rx->offset;
816 skb->len = rx->status;
817 skb->tail = skb->data + skb->len;
819 /*
820 * Old backends do not assert data_validated but we
821 * can infer it from csum_blank so test both flags.
822 */
823 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) {
824 skb->ip_summed = CHECKSUM_UNNECESSARY;
825 skb->proto_data_valid = 1;
826 } else {
827 skb->ip_summed = CHECKSUM_NONE;
828 skb->proto_data_valid = 0;
829 }
830 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
832 np->stats.rx_packets++;
833 np->stats.rx_bytes += rx->status;
835 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
836 /* Remap the page. */
837 MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
838 pfn_pte_ma(mfn, PAGE_KERNEL),
839 0);
840 mcl++;
841 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
842 | MMU_MACHPHYS_UPDATE;
843 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
844 mmu++;
846 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
847 mfn);
848 }
850 __skb_queue_tail(&rxq, skb);
851 }
853 /* Some pages are no longer absent... */
854 balloon_update_driver_allowance(-work_done);
856 /* Do all the remapping work, and M2P updates, in one big hypercall. */
857 if (likely((mcl - np->rx_mcl) != 0)) {
858 mcl->op = __HYPERVISOR_mmu_update;
859 mcl->args[0] = (unsigned long)np->rx_mmu;
860 mcl->args[1] = mmu - np->rx_mmu;
861 mcl->args[2] = 0;
862 mcl->args[3] = DOMID_SELF;
863 mcl++;
864 (void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
865 }
867 while ((skb = __skb_dequeue(&rxq)) != NULL) {
868 if (skb->len > (dev->mtu + ETH_HLEN + 4)) {
869 if (net_ratelimit())
870 printk(KERN_INFO "Received packet too big for "
871 "MTU (%d > %d)\n",
872 skb->len - ETH_HLEN - 4, dev->mtu);
873 skb->len = 0;
874 skb->tail = skb->data;
875 init_skb_shinfo(skb);
876 dev_kfree_skb(skb);
877 continue;
878 }
880 /*
881 * Enough room in skbuff for the data we were passed? Also,
882 * Linux expects at least 16 bytes headroom in each rx buffer.
883 */
884 if (unlikely(skb->tail > skb->end) ||
885 unlikely((skb->data - skb->head) < 16)) {
886 if (net_ratelimit()) {
887 if (skb->tail > skb->end)
888 printk(KERN_INFO "Received packet "
889 "is %zd bytes beyond tail.\n",
890 skb->tail - skb->end);
891 else
892 printk(KERN_INFO "Received packet "
893 "is %zd bytes before head.\n",
894 16 - (skb->data - skb->head));
895 }
897 nskb = __dev_alloc_skb(skb->len + 2,
898 GFP_ATOMIC|__GFP_NOWARN);
899 if (nskb != NULL) {
900 skb_reserve(nskb, 2);
901 skb_put(nskb, skb->len);
902 memcpy(nskb->data, skb->data, skb->len);
903 nskb->dev = skb->dev;
904 nskb->ip_summed = skb->ip_summed;
905 }
907 /* Reinitialise and then destroy the old skbuff. */
908 skb->len = 0;
909 skb->tail = skb->data;
910 init_skb_shinfo(skb);
911 dev_kfree_skb(skb);
913 /* Switch old for new, if we copied the buffer. */
914 if ((skb = nskb) == NULL)
915 continue;
916 }
918 /* Set the shinfo area, which is hidden behind the data. */
919 init_skb_shinfo(skb);
920 /* Ethernet work: Delayed to here as it peeks the header. */
921 skb->protocol = eth_type_trans(skb, dev);
923 /* Pass it up. */
924 netif_receive_skb(skb);
925 dev->last_rx = jiffies;
926 }
928 np->rx.rsp_cons = i;
930 /* If we get a callback with very few responses, reduce fill target. */
931 /* NB. Note exponential increase, linear decrease. */
932 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
933 ((3*np->rx_target) / 4)) &&
934 (--np->rx_target < np->rx_min_target))
935 np->rx_target = np->rx_min_target;
937 network_alloc_rx_buffers(dev);
939 *pbudget -= work_done;
940 dev->quota -= work_done;
942 if (work_done < budget) {
943 local_irq_save(flags);
945 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
946 if (!more_to_do)
947 __netif_rx_complete(dev);
949 local_irq_restore(flags);
950 }
952 spin_unlock(&np->rx_lock);
954 return more_to_do;
955 }
958 static int network_close(struct net_device *dev)
959 {
960 struct netfront_info *np = netdev_priv(dev);
961 np->user_state = UST_CLOSED;
962 netif_stop_queue(np->netdev);
963 return 0;
964 }
967 static struct net_device_stats *network_get_stats(struct net_device *dev)
968 {
969 struct netfront_info *np = netdev_priv(dev);
970 return &np->stats;
971 }
973 static void network_connect(struct net_device *dev)
974 {
975 struct netfront_info *np;
976 int i, requeue_idx;
977 netif_tx_request_t *tx;
978 struct sk_buff *skb;
980 np = netdev_priv(dev);
981 spin_lock_irq(&np->tx_lock);
982 spin_lock(&np->rx_lock);
984 /* Recovery procedure: */
986 /* Step 1: Reinitialise variables. */
987 np->tx_full = 0;
989 /*
990 * Step 2: Rebuild the RX and TX ring contents.
991 * NB. We could just free the queued TX packets now but we hope
992 * that sending them out might do some good. We have to rebuild
993 * the RX ring because some of our pages are currently flipped out
994 * so we can't just free the RX skbs.
995 * NB2. Freelist index entries are always going to be less than
996 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
997 * greater than PAGE_OFFSET: we use this property to distinguish
998 * them.
999 */
1001 /*
1002 * Rebuild the TX buffer freelist and the TX ring itself.
1003 * NB. This reorders packets. We could keep more private state
1004 * to avoid this but maybe it doesn't matter so much given the
1005 * interface has been down.
1006 */
1007 for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
1008 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1009 continue;
1011 skb = np->tx_skbs[i];
1013 tx = RING_GET_REQUEST(&np->tx, requeue_idx);
1014 requeue_idx++;
1016 tx->id = i;
1017 gnttab_grant_foreign_access_ref(
1018 np->grant_tx_ref[i], np->xbdev->otherend_id,
1019 virt_to_mfn(np->tx_skbs[i]->data),
1020 GNTMAP_readonly);
1021 tx->gref = np->grant_tx_ref[i];
1022 tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
1023 tx->size = skb->len;
1024 tx->flags = 0;
1025 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
1026 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
1027 if (skb->proto_data_valid) /* remote but checksummed? */
1028 tx->flags |= NETTXF_data_validated;
1030 np->stats.tx_bytes += skb->len;
1031 np->stats.tx_packets++;
1034 np->tx.req_prod_pvt = requeue_idx;
1035 RING_PUSH_REQUESTS(&np->tx);
1037 /* Rebuild the RX buffer freelist and the RX ring itself. */
1038 for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
1039 if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
1040 continue;
1041 gnttab_grant_foreign_transfer_ref(
1042 np->grant_rx_ref[i], np->xbdev->otherend_id,
1043 __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
1044 RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
1045 np->grant_rx_ref[i];
1046 RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
1047 requeue_idx++;
1050 np->rx.req_prod_pvt = requeue_idx;
1051 RING_PUSH_REQUESTS(&np->rx);
1053 /*
1054 * Step 3: All public and private state should now be sane. Get
1055 * ready to start sending and receiving packets and give the driver
1056 * domain a kick because we've probably just requeued some
1057 * packets.
1058 */
1059 np->backend_state = BEST_CONNECTED;
1060 notify_remote_via_irq(np->irq);
1061 network_tx_buf_gc(dev);
1063 if (np->user_state == UST_OPEN)
1064 netif_start_queue(dev);
1066 spin_unlock(&np->rx_lock);
1067 spin_unlock_irq(&np->tx_lock);
1070 static void show_device(struct netfront_info *np)
1072 #ifdef DEBUG
1073 if (np) {
1074 IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
1075 np->handle,
1076 be_state_name[np->backend_state],
1077 np->user_state ? "open" : "closed",
1078 np->evtchn,
1079 np->tx,
1080 np->rx);
1081 } else
1082 IPRINTK("<vif NULL>\n");
1083 #endif
1086 static void netif_uninit(struct net_device *dev)
1088 struct netfront_info *np = netdev_priv(dev);
1089 gnttab_free_grant_references(np->gref_tx_head);
1090 gnttab_free_grant_references(np->gref_rx_head);
1093 static struct ethtool_ops network_ethtool_ops =
1095 .get_tx_csum = ethtool_op_get_tx_csum,
1096 .set_tx_csum = ethtool_op_set_tx_csum,
1097 };
1099 /** Create a network device.
1100 * @param handle device handle
1101 * @param val return parameter for created device
1102 * @return 0 on success, error code otherwise
1103 */
1104 static int create_netdev(int handle, struct xenbus_device *dev,
1105 struct net_device **val)
1107 int i, err = 0;
1108 struct net_device *netdev = NULL;
1109 struct netfront_info *np = NULL;
1111 if ((netdev = alloc_etherdev(sizeof(struct netfront_info))) == NULL) {
1112 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1113 __FUNCTION__);
1114 err = -ENOMEM;
1115 goto exit;
1118 np = netdev_priv(netdev);
1119 np->backend_state = BEST_CLOSED;
1120 np->user_state = UST_CLOSED;
1121 np->handle = handle;
1122 np->xbdev = dev;
1124 spin_lock_init(&np->tx_lock);
1125 spin_lock_init(&np->rx_lock);
1127 skb_queue_head_init(&np->rx_batch);
1128 np->rx_target = RX_DFL_MIN_TARGET;
1129 np->rx_min_target = RX_DFL_MIN_TARGET;
1130 np->rx_max_target = RX_MAX_TARGET;
1132 init_timer(&np->rx_refill_timer);
1133 np->rx_refill_timer.data = (unsigned long)netdev;
1134 np->rx_refill_timer.function = rx_refill_timeout;
1136 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
1137 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1138 np->tx_skbs[i] = (void *)((unsigned long) i+1);
1139 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1142 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1143 np->rx_skbs[i] = (void *)((unsigned long) i+1);
1144 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1147 /* A grant for every tx ring slot */
1148 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1149 &np->gref_tx_head) < 0) {
1150 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1151 err = -ENOMEM;
1152 goto exit;
1154 /* A grant for every rx ring slot */
1155 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1156 &np->gref_rx_head) < 0) {
1157 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1158 gnttab_free_grant_references(np->gref_tx_head);
1159 err = -ENOMEM;
1160 goto exit;
1163 netdev->open = network_open;
1164 netdev->hard_start_xmit = network_start_xmit;
1165 netdev->stop = network_close;
1166 netdev->get_stats = network_get_stats;
1167 netdev->poll = netif_poll;
1168 netdev->uninit = netif_uninit;
1169 netdev->weight = 64;
1170 netdev->features = NETIF_F_IP_CSUM;
1172 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
1173 SET_MODULE_OWNER(netdev);
1174 SET_NETDEV_DEV(netdev, &dev->dev);
1176 if ((err = register_netdev(netdev)) != 0) {
1177 printk(KERN_WARNING "%s> register_netdev err=%d\n",
1178 __FUNCTION__, err);
1179 goto exit_free_grefs;
1182 if ((err = xennet_proc_addif(netdev)) != 0) {
1183 unregister_netdev(netdev);
1184 goto exit_free_grefs;
1187 np->netdev = netdev;
1189 exit:
1190 if (err != 0)
1191 kfree(netdev);
1192 else if (val != NULL)
1193 *val = netdev;
1194 return err;
1196 exit_free_grefs:
1197 gnttab_free_grant_references(np->gref_tx_head);
1198 gnttab_free_grant_references(np->gref_rx_head);
1199 goto exit;
1202 /*
1203 * We use this notifier to send out a fake ARP reply to reset switches and
1204 * router ARP caches when an IP interface is brought up on a VIF.
1205 */
1206 static int
1207 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1209 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1210 struct net_device *dev = ifa->ifa_dev->dev;
1212 /* UP event and is it one of our devices? */
1213 if (event == NETDEV_UP && dev->open == network_open)
1214 (void)send_fake_arp(dev);
1216 return NOTIFY_DONE;
1220 /* ** Close down ** */
1223 /**
1224 * Handle the change of state of the backend to Closing. We must delete our
1225 * device-layer structures now, to ensure that writes are flushed through to
1226 * the backend. Once is this done, we can switch to Closed in
1227 * acknowledgement.
1228 */
1229 static void netfront_closing(struct xenbus_device *dev)
1231 struct netfront_info *info = dev->data;
1233 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1235 close_netdev(info);
1237 xenbus_switch_state(dev, XenbusStateClosed);
1241 static int netfront_remove(struct xenbus_device *dev)
1243 struct netfront_info *info = dev->data;
1245 DPRINTK("%s\n", dev->nodename);
1247 netif_disconnect_backend(info);
1248 free_netdev(info->netdev);
1250 return 0;
1254 static void close_netdev(struct netfront_info *info)
1256 spin_lock_irq(&info->netdev->xmit_lock);
1257 netif_stop_queue(info->netdev);
1258 spin_unlock_irq(&info->netdev->xmit_lock);
1260 #ifdef CONFIG_PROC_FS
1261 xennet_proc_delif(info->netdev);
1262 #endif
1264 del_timer_sync(&info->rx_refill_timer);
1266 unregister_netdev(info->netdev);
1270 static void netif_disconnect_backend(struct netfront_info *info)
1272 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1273 spin_lock_irq(&info->tx_lock);
1274 spin_lock(&info->rx_lock);
1275 info->backend_state = BEST_DISCONNECTED;
1276 spin_unlock(&info->rx_lock);
1277 spin_unlock_irq(&info->tx_lock);
1279 if (info->irq)
1280 unbind_from_irqhandler(info->irq, info->netdev);
1281 info->evtchn = info->irq = 0;
1283 end_access(info->tx_ring_ref, info->tx.sring);
1284 end_access(info->rx_ring_ref, info->rx.sring);
1285 info->tx_ring_ref = GRANT_INVALID_REF;
1286 info->rx_ring_ref = GRANT_INVALID_REF;
1287 info->tx.sring = NULL;
1288 info->rx.sring = NULL;
1292 static void netif_free(struct netfront_info *info)
1294 close_netdev(info);
1295 netif_disconnect_backend(info);
1296 free_netdev(info->netdev);
1300 static void end_access(int ref, void *page)
1302 if (ref != GRANT_INVALID_REF)
1303 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1307 /* ** Driver registration ** */
1310 static struct xenbus_device_id netfront_ids[] = {
1311 { "vif" },
1312 { "" }
1313 };
1316 static struct xenbus_driver netfront = {
1317 .name = "vif",
1318 .owner = THIS_MODULE,
1319 .ids = netfront_ids,
1320 .probe = netfront_probe,
1321 .remove = netfront_remove,
1322 .resume = netfront_resume,
1323 .otherend_changed = backend_changed,
1324 };
1327 static struct notifier_block notifier_inetdev = {
1328 .notifier_call = inetdev_notify,
1329 .next = NULL,
1330 .priority = 0
1331 };
1333 static int __init netif_init(void)
1335 int err = 0;
1337 if (xen_start_info->flags & SIF_INITDOMAIN)
1338 return 0;
1340 if ((err = xennet_proc_init()) != 0)
1341 return err;
1343 IPRINTK("Initialising virtual ethernet driver.\n");
1345 (void)register_inetaddr_notifier(&notifier_inetdev);
1347 return xenbus_register_frontend(&netfront);
1349 module_init(netif_init);
1352 static void netif_exit(void)
1354 unregister_inetaddr_notifier(&notifier_inetdev);
1356 return xenbus_unregister_driver(&netfront);
1358 module_exit(netif_exit);
1360 MODULE_LICENSE("Dual BSD/GPL");
1363 /* ** /proc **/
1366 #ifdef CONFIG_PROC_FS
1368 #define TARGET_MIN 0UL
1369 #define TARGET_MAX 1UL
1370 #define TARGET_CUR 2UL
1372 static int xennet_proc_read(
1373 char *page, char **start, off_t off, int count, int *eof, void *data)
1375 struct net_device *dev =
1376 (struct net_device *)((unsigned long)data & ~3UL);
1377 struct netfront_info *np = netdev_priv(dev);
1378 int len = 0, which_target = (long)data & 3;
1380 switch (which_target) {
1381 case TARGET_MIN:
1382 len = sprintf(page, "%d\n", np->rx_min_target);
1383 break;
1384 case TARGET_MAX:
1385 len = sprintf(page, "%d\n", np->rx_max_target);
1386 break;
1387 case TARGET_CUR:
1388 len = sprintf(page, "%d\n", np->rx_target);
1389 break;
1392 *eof = 1;
1393 return len;
1396 static int xennet_proc_write(
1397 struct file *file, const char __user *buffer,
1398 unsigned long count, void *data)
1400 struct net_device *dev =
1401 (struct net_device *)((unsigned long)data & ~3UL);
1402 struct netfront_info *np = netdev_priv(dev);
1403 int which_target = (long)data & 3;
1404 char string[64];
1405 long target;
1407 if (!capable(CAP_SYS_ADMIN))
1408 return -EPERM;
1410 if (count <= 1)
1411 return -EBADMSG; /* runt */
1412 if (count > sizeof(string))
1413 return -EFBIG; /* too long */
1415 if (copy_from_user(string, buffer, count))
1416 return -EFAULT;
1417 string[sizeof(string)-1] = '\0';
1419 target = simple_strtol(string, NULL, 10);
1420 if (target < RX_MIN_TARGET)
1421 target = RX_MIN_TARGET;
1422 if (target > RX_MAX_TARGET)
1423 target = RX_MAX_TARGET;
1425 spin_lock(&np->rx_lock);
1427 switch (which_target) {
1428 case TARGET_MIN:
1429 if (target > np->rx_max_target)
1430 np->rx_max_target = target;
1431 np->rx_min_target = target;
1432 if (target > np->rx_target)
1433 np->rx_target = target;
1434 break;
1435 case TARGET_MAX:
1436 if (target < np->rx_min_target)
1437 np->rx_min_target = target;
1438 np->rx_max_target = target;
1439 if (target < np->rx_target)
1440 np->rx_target = target;
1441 break;
1442 case TARGET_CUR:
1443 break;
1446 network_alloc_rx_buffers(dev);
1448 spin_unlock(&np->rx_lock);
1450 return count;
1453 static int xennet_proc_init(void)
1455 if (proc_mkdir("xen/net", NULL) == NULL)
1456 return -ENOMEM;
1457 return 0;
1460 static int xennet_proc_addif(struct net_device *dev)
1462 struct proc_dir_entry *dir, *min, *max, *cur;
1463 char name[30];
1465 sprintf(name, "xen/net/%s", dev->name);
1467 dir = proc_mkdir(name, NULL);
1468 if (!dir)
1469 goto nomem;
1471 min = create_proc_entry("rxbuf_min", 0644, dir);
1472 max = create_proc_entry("rxbuf_max", 0644, dir);
1473 cur = create_proc_entry("rxbuf_cur", 0444, dir);
1474 if (!min || !max || !cur)
1475 goto nomem;
1477 min->read_proc = xennet_proc_read;
1478 min->write_proc = xennet_proc_write;
1479 min->data = (void *)((unsigned long)dev | TARGET_MIN);
1481 max->read_proc = xennet_proc_read;
1482 max->write_proc = xennet_proc_write;
1483 max->data = (void *)((unsigned long)dev | TARGET_MAX);
1485 cur->read_proc = xennet_proc_read;
1486 cur->write_proc = xennet_proc_write;
1487 cur->data = (void *)((unsigned long)dev | TARGET_CUR);
1489 return 0;
1491 nomem:
1492 xennet_proc_delif(dev);
1493 return -ENOMEM;
1496 static void xennet_proc_delif(struct net_device *dev)
1498 char name[30];
1500 sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
1501 remove_proc_entry(name, NULL);
1503 sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
1504 remove_proc_entry(name, NULL);
1506 sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
1507 remove_proc_entry(name, NULL);
1509 sprintf(name, "xen/net/%s", dev->name);
1510 remove_proc_entry(name, NULL);
1513 #endif
1516 /*
1517 * Local variables:
1518 * c-file-style: "linux"
1519 * indent-tabs-mode: t
1520 * c-indent-level: 8
1521 * c-basic-offset: 8
1522 * tab-width: 8
1523 * End:
1524 */