ia64/linux-2.6.18-xen.hg

view drivers/xen/sfc_netback/accel_xenbus.c @ 847:ad4d307bf9ce

net sfc: Update sfc and sfc_resource driver to latest release

...and update sfc_netfront, sfc_netback, sfc_netutil for any API changes

sfc_netback: Fix asymmetric use of SFC buffer table alloc and free
sfc_netback: Clean up if no SFC accel device found
sfc_netback: Gracefully handle case where page grant fails
sfc_netback: Disable net acceleration if the physical link goes down
sfc_netfront: Less verbose error messages, more verbose counters for
rx discard errors
sfc_netfront: Gracefully handle case where SFC netfront fails during
initialisation

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:59:10 2009 +0100 (2009-03-31)
parents 8c8a097cae69
children
line source
1 /****************************************************************************
2 * Solarflare driver for Xen network acceleration
3 *
4 * Copyright 2006-2008: Solarflare Communications Inc,
5 * 9501 Jeronimo Road, Suite 250,
6 * Irvine, CA 92618, USA
7 *
8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation, incorporated herein by reference.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 ****************************************************************************
23 */
25 #include <xen/evtchn.h>
26 #include <linux/mutex.h>
28 /* drivers/xen/netback/common.h */
29 #include "common.h"
31 #include "accel.h"
32 #include "accel_solarflare.h"
33 #include "accel_util.h"
35 #define NODENAME_PATH_FMT "backend/vif/%d/%d"
37 #define NETBACK_ACCEL_FROM_XENBUS_DEVICE(_dev) (struct netback_accel *) \
38 ((struct backend_info *)(_dev)->dev.driver_data)->netback_accel_priv
40 /* List of all the bends currently in existence. */
41 struct netback_accel *bend_list = NULL;
42 DEFINE_MUTEX(bend_list_mutex);
44 /* Put in bend_list. Must hold bend_list_mutex */
45 static void link_bend(struct netback_accel *bend)
46 {
47 bend->next_bend = bend_list;
48 bend_list = bend;
49 }
51 /* Remove from bend_list, Must hold bend_list_mutex */
52 static void unlink_bend(struct netback_accel *bend)
53 {
54 struct netback_accel *tmp = bend_list;
55 struct netback_accel *prev = NULL;
56 while (tmp != NULL) {
57 if (tmp == bend) {
58 if (prev != NULL)
59 prev->next_bend = bend->next_bend;
60 else
61 bend_list = bend->next_bend;
62 return;
63 }
64 prev = tmp;
65 tmp = tmp->next_bend;
66 }
67 }
70 /* Demultiplex a message IRQ from the frontend driver. */
71 static irqreturn_t msgirq_from_frontend(int irq, void *context,
72 struct pt_regs *unused)
73 {
74 struct xenbus_device *dev = context;
75 struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
76 VPRINTK("irq %d from device %s\n", irq, dev->nodename);
77 schedule_work(&bend->handle_msg);
78 return IRQ_HANDLED;
79 }
82 /*
83 * Demultiplex an IRQ from the frontend driver. This is never used
84 * functionally, but we need it to pass to the bind function, and may
85 * get called spuriously
86 */
87 static irqreturn_t netirq_from_frontend(int irq, void *context,
88 struct pt_regs *unused)
89 {
90 VPRINTK("netirq %d from device %s\n", irq,
91 ((struct xenbus_device *)context)->nodename);
93 return IRQ_HANDLED;
94 }
97 /* Read the limits values of the xenbus structure. */
98 static
99 void cfg_hw_quotas(struct xenbus_device *dev, struct netback_accel *bend)
100 {
101 int err = xenbus_gather
102 (XBT_NIL, dev->nodename,
103 "limits/max-filters", "%d", &bend->quotas.max_filters,
104 "limits/max-buf-pages", "%d", &bend->quotas.max_buf_pages,
105 "limits/max-mcasts", "%d", &bend->quotas.max_mcasts,
106 NULL);
107 if (err) {
108 /*
109 * TODO what if they have previously been set by the
110 * user? This will overwrite with defaults. Maybe
111 * not what we want to do, but useful in startup
112 * case
113 */
114 DPRINTK("Failed to read quotas from xenbus, using defaults\n");
115 bend->quotas.max_filters = NETBACK_ACCEL_DEFAULT_MAX_FILTERS;
116 bend->quotas.max_buf_pages = sfc_netback_max_pages;
117 bend->quotas.max_mcasts = NETBACK_ACCEL_DEFAULT_MAX_MCASTS;
118 }
120 return;
121 }
124 static void bend_config_accel_change(struct xenbus_watch *watch,
125 const char **vec, unsigned int len)
126 {
127 struct netback_accel *bend;
129 bend = container_of(watch, struct netback_accel, config_accel_watch);
131 mutex_lock(&bend->bend_mutex);
132 if (bend->config_accel_watch.node != NULL) {
133 struct xenbus_device *dev =
134 (struct xenbus_device *)bend->hdev_data;
135 DPRINTK("Watch matched, got dev %p otherend %p\n",
136 dev, dev->otherend);
137 if(!xenbus_exists(XBT_NIL, watch->node, "")) {
138 DPRINTK("Ignoring watch as otherend seems invalid\n");
139 goto out;
140 }
142 cfg_hw_quotas(dev, bend);
143 }
144 out:
145 mutex_unlock(&bend->bend_mutex);
146 return;
147 }
150 /*
151 * Setup watch on "limits" in the backend vif info to know when
152 * configuration has been set
153 */
154 static int setup_config_accel_watch(struct xenbus_device *dev,
155 struct netback_accel *bend)
156 {
157 int err;
159 VPRINTK("Setting watch on %s/%s\n", dev->nodename, "limits");
161 err = xenbus_watch_path2(dev, dev->nodename, "limits",
162 &bend->config_accel_watch,
163 bend_config_accel_change);
165 if (err) {
166 EPRINTK("%s: Failed to register xenbus watch: %d\n",
167 __FUNCTION__, err);
168 bend->config_accel_watch.node = NULL;
169 return err;
170 }
171 return 0;
172 }
175 static int
176 cfg_frontend_info(struct xenbus_device *dev, struct netback_accel *bend,
177 int *grants)
178 {
179 /* Get some info from xenbus on the event channel and shmem grant */
180 int err = xenbus_gather(XBT_NIL, dev->otherend,
181 "accel-msg-channel", "%u", &bend->msg_channel,
182 "accel-ctrl-page", "%d", &(grants[0]),
183 "accel-msg-page", "%d", &(grants[1]),
184 "accel-net-channel", "%u", &bend->net_channel,
185 NULL);
186 if (err)
187 EPRINTK("failed to read event channels or shmem grant: %d\n",
188 err);
189 else
190 DPRINTK("got event chan %d and net chan %d from frontend\n",
191 bend->msg_channel, bend->net_channel);
192 return err;
193 }
196 /* Setup all the comms needed to chat with the front end driver */
197 static int setup_vnic(struct xenbus_device *dev)
198 {
199 struct netback_accel *bend;
200 int grants[2], err, msgs_per_queue;
202 bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
204 err = cfg_frontend_info(dev, bend, grants);
205 if (err)
206 goto fail1;
208 /*
209 * If we get here, both frontend Connected and configuration
210 * options available. All is well.
211 */
213 /* Get the hardware quotas for the VNIC in question. */
214 cfg_hw_quotas(dev, bend);
216 /* Set up the deferred work handlers */
217 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
218 INIT_WORK(&bend->handle_msg,
219 netback_accel_msg_rx_handler);
220 #else
221 INIT_WORK(&bend->handle_msg,
222 netback_accel_msg_rx_handler,
223 (void*)bend);
224 #endif
226 /* Request the frontend mac */
227 err = net_accel_xen_net_read_mac(dev, bend->mac);
228 if (err)
229 goto fail2;
231 /* Set up the shared page. */
232 bend->shared_page = net_accel_map_grants_contig(dev, grants, 2,
233 &bend->sh_pages_unmap);
235 if (bend->shared_page == NULL) {
236 EPRINTK("failed to map shared page for %s\n", dev->otherend);
237 err = -ENOMEM;
238 goto fail2;
239 }
241 /* Initialise the shared page(s) used for comms */
242 net_accel_msg_init_page(bend->shared_page, PAGE_SIZE,
243 (bend->net_dev->flags & IFF_UP) &&
244 (netif_carrier_ok(bend->net_dev)));
246 msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
248 net_accel_msg_init_queue
249 (&bend->to_domU, &bend->shared_page->queue0,
250 (struct net_accel_msg *)((__u8*)bend->shared_page + PAGE_SIZE),
251 msgs_per_queue);
253 net_accel_msg_init_queue
254 (&bend->from_domU, &bend->shared_page->queue1,
255 (struct net_accel_msg *)((__u8*)bend->shared_page +
256 (3 * PAGE_SIZE / 2)),
257 msgs_per_queue);
259 /* Bind the message event channel to a handler
260 *
261 * Note that we will probably get a spurious interrupt when we
262 * do this, so it must not be done until we have set up
263 * everything we need to handle it.
264 */
265 err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
266 bend->msg_channel,
267 msgirq_from_frontend,
268 0,
269 "netback_accel",
270 dev);
271 if (err < 0) {
272 EPRINTK("failed to bind event channel: %d\n", err);
273 goto fail3;
274 }
275 else
276 bend->msg_channel_irq = err;
278 /* TODO: No need to bind this evtchn to an irq. */
279 err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
280 bend->net_channel,
281 netirq_from_frontend,
282 0,
283 "netback_accel",
284 dev);
285 if (err < 0) {
286 EPRINTK("failed to bind net channel: %d\n", err);
287 goto fail4;
288 }
289 else
290 bend->net_channel_irq = err;
292 /*
293 * Grab ourselves an entry in the forwarding hash table. We do
294 * this now so we don't have the embarassmesnt of sorting out
295 * an allocation failure while at IRQ. Because we pass NULL as
296 * the context, the actual hash lookup will succeed for this
297 * NIC, but the check for somewhere to forward to will
298 * fail. This is necessary to prevent forwarding before
299 * hardware resources are set up
300 */
301 err = netback_accel_fwd_add(bend->mac, NULL, bend->fwd_priv);
302 if (err) {
303 EPRINTK("failed to add to fwd hash table\n");
304 goto fail5;
305 }
307 /*
308 * Say hello to frontend. Important to do this straight after
309 * obtaining the message queue as otherwise we are vulnerable
310 * to an evil frontend sending a HELLO-REPLY before we've sent
311 * the HELLO and confusing us
312 */
313 netback_accel_msg_tx_hello(bend, NET_ACCEL_MSG_VERSION);
314 return 0;
316 fail5:
317 unbind_from_irqhandler(bend->net_channel_irq, dev);
318 fail4:
319 unbind_from_irqhandler(bend->msg_channel_irq, dev);
320 fail3:
321 net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
322 bend->shared_page = NULL;
323 bend->sh_pages_unmap = NULL;
324 fail2:
325 fail1:
326 return err;
327 }
330 static int read_nicname(struct xenbus_device *dev, struct netback_accel *bend)
331 {
332 int len;
334 /* nic name used to select interface used for acceleration */
335 bend->nicname = xenbus_read(XBT_NIL, dev->nodename, "accel", &len);
336 if (IS_ERR(bend->nicname))
337 return PTR_ERR(bend->nicname);
339 return 0;
340 }
342 static const char *frontend_name = "sfc_netfront";
344 static int publish_frontend_name(struct xenbus_device *dev)
345 {
346 struct xenbus_transaction tr;
347 int err;
349 /* Publish the name of the frontend driver */
350 do {
351 err = xenbus_transaction_start(&tr);
352 if (err != 0) {
353 EPRINTK("%s: transaction start failed\n", __FUNCTION__);
354 return err;
355 }
356 err = xenbus_printf(tr, dev->nodename, "accel-frontend",
357 "%s", frontend_name);
358 if (err != 0) {
359 EPRINTK("%s: xenbus_printf failed\n", __FUNCTION__);
360 xenbus_transaction_end(tr, 1);
361 return err;
362 }
363 err = xenbus_transaction_end(tr, 0);
364 } while (err == -EAGAIN);
366 if (err != 0) {
367 EPRINTK("failed to end frontend name transaction\n");
368 return err;
369 }
370 return 0;
371 }
374 static int unpublish_frontend_name(struct xenbus_device *dev)
375 {
376 struct xenbus_transaction tr;
377 int err;
379 do {
380 err = xenbus_transaction_start(&tr);
381 if (err != 0)
382 break;
383 err = xenbus_rm(tr, dev->nodename, "accel-frontend");
384 if (err != 0) {
385 xenbus_transaction_end(tr, 1);
386 break;
387 }
388 err = xenbus_transaction_end(tr, 0);
389 } while (err == -EAGAIN);
391 return err;
392 }
395 static void cleanup_vnic(struct netback_accel *bend)
396 {
397 struct xenbus_device *dev;
399 dev = (struct xenbus_device *)bend->hdev_data;
401 DPRINTK("%s: bend %p dev %p\n", __FUNCTION__, bend, dev);
403 DPRINTK("%s: Remove %p's mac from fwd table...\n",
404 __FUNCTION__, bend);
405 netback_accel_fwd_remove(bend->mac, bend->fwd_priv);
407 /* Free buffer table allocations */
408 netback_accel_remove_buffers(bend);
410 DPRINTK("%s: Release hardware resources...\n", __FUNCTION__);
411 if (bend->accel_shutdown)
412 bend->accel_shutdown(bend);
414 if (bend->net_channel_irq) {
415 unbind_from_irqhandler(bend->net_channel_irq, dev);
416 bend->net_channel_irq = 0;
417 }
419 if (bend->msg_channel_irq) {
420 unbind_from_irqhandler(bend->msg_channel_irq, dev);
421 bend->msg_channel_irq = 0;
422 }
424 if (bend->sh_pages_unmap) {
425 DPRINTK("%s: Unmap grants %p\n", __FUNCTION__,
426 bend->sh_pages_unmap);
427 net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
428 bend->sh_pages_unmap = NULL;
429 bend->shared_page = NULL;
430 }
431 }
434 /*************************************************************************/
436 /*
437 * The following code handles accelstate changes between the frontend
438 * and the backend. It calls setup_vnic and cleanup_vnic in matching
439 * pairs in response to transitions.
440 *
441 * Valid state transitions for Dom0 are as follows:
442 *
443 * Closed->Init on probe or in response to Init from domU
444 * Closed->Closing on error/remove
445 *
446 * Init->Connected in response to Connected from domU
447 * Init->Closing on error/remove or in response to Closing from domU
448 *
449 * Connected->Closing on error/remove or in response to Closing from domU
450 *
451 * Closing->Closed in response to Closed from domU
452 *
453 */
456 static void netback_accel_frontend_changed(struct xenbus_device *dev,
457 XenbusState frontend_state)
458 {
459 struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
460 XenbusState backend_state;
462 DPRINTK("%s: changing from %s to %s. nodename %s, otherend %s\n",
463 __FUNCTION__, xenbus_strstate(bend->frontend_state),
464 xenbus_strstate(frontend_state),dev->nodename, dev->otherend);
466 /*
467 * Ignore duplicate state changes. This can happen if the
468 * frontend changes state twice in quick succession and the
469 * first watch fires in the backend after the second
470 * transition has completed.
471 */
472 if (bend->frontend_state == frontend_state)
473 return;
475 bend->frontend_state = frontend_state;
476 backend_state = bend->backend_state;
478 switch (frontend_state) {
479 case XenbusStateInitialising:
480 if (backend_state == XenbusStateClosed &&
481 !bend->removing)
482 backend_state = XenbusStateInitialising;
483 break;
485 case XenbusStateConnected:
486 if (backend_state == XenbusStateInitialising) {
487 if (!bend->vnic_is_setup &&
488 setup_vnic(dev) == 0) {
489 bend->vnic_is_setup = 1;
490 backend_state = XenbusStateConnected;
491 } else {
492 backend_state = XenbusStateClosing;
493 }
494 }
495 break;
497 case XenbusStateInitWait:
498 case XenbusStateInitialised:
499 default:
500 DPRINTK("Unknown state %s (%d) from frontend.\n",
501 xenbus_strstate(frontend_state), frontend_state);
502 /* Unknown state. Fall through. */
503 case XenbusStateClosing:
504 if (backend_state != XenbusStateClosed)
505 backend_state = XenbusStateClosing;
507 /*
508 * The bend will now persist (with watches active) in
509 * case the frontend comes back again, eg. after
510 * frontend module reload or suspend/resume
511 */
513 break;
515 case XenbusStateUnknown:
516 case XenbusStateClosed:
517 if (bend->vnic_is_setup) {
518 bend->vnic_is_setup = 0;
519 cleanup_vnic(bend);
520 }
522 if (backend_state == XenbusStateClosing)
523 backend_state = XenbusStateClosed;
524 break;
525 }
527 if (backend_state != bend->backend_state) {
528 DPRINTK("Switching from state %s (%d) to %s (%d)\n",
529 xenbus_strstate(bend->backend_state),
530 bend->backend_state,
531 xenbus_strstate(backend_state), backend_state);
532 bend->backend_state = backend_state;
533 net_accel_update_state(dev, backend_state);
534 }
536 wake_up(&bend->state_wait_queue);
537 }
540 /* accelstate on the frontend's xenbus node has changed */
541 static void bend_domu_accel_change(struct xenbus_watch *watch,
542 const char **vec, unsigned int len)
543 {
544 int state;
545 struct netback_accel *bend;
547 bend = container_of(watch, struct netback_accel, domu_accel_watch);
548 if (bend->domu_accel_watch.node != NULL) {
549 struct xenbus_device *dev =
550 (struct xenbus_device *)bend->hdev_data;
551 VPRINTK("Watch matched, got dev %p otherend %p\n",
552 dev, dev->otherend);
553 /*
554 * dev->otherend != NULL check to protect against
555 * watch firing when domain goes away and we haven't
556 * yet cleaned up
557 */
558 if (!dev->otherend ||
559 !xenbus_exists(XBT_NIL, watch->node, "") ||
560 strncmp(dev->otherend, vec[XS_WATCH_PATH],
561 strlen(dev->otherend))) {
562 DPRINTK("Ignoring watch as otherend seems invalid\n");
563 return;
564 }
566 mutex_lock(&bend->bend_mutex);
568 xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
569 &state);
570 netback_accel_frontend_changed(dev, state);
572 mutex_unlock(&bend->bend_mutex);
573 }
574 }
576 /* Setup watch on frontend's accelstate */
577 static int setup_domu_accel_watch(struct xenbus_device *dev,
578 struct netback_accel *bend)
579 {
580 int err;
582 VPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate");
584 err = xenbus_watch_path2(dev, dev->otherend, "accelstate",
585 &bend->domu_accel_watch,
586 bend_domu_accel_change);
587 if (err) {
588 EPRINTK("%s: Failed to register xenbus watch: %d\n",
589 __FUNCTION__, err);
590 goto fail;
591 }
592 return 0;
593 fail:
594 bend->domu_accel_watch.node = NULL;
595 return err;
596 }
599 int netback_accel_probe(struct xenbus_device *dev)
600 {
601 struct netback_accel *bend;
602 struct backend_info *binfo;
603 int err;
605 DPRINTK("%s: passed device %s\n", __FUNCTION__, dev->nodename);
607 /* Allocate structure to store all our state... */
608 bend = kzalloc(sizeof(struct netback_accel), GFP_KERNEL);
609 if (bend == NULL) {
610 DPRINTK("%s: no memory for bend\n", __FUNCTION__);
611 return -ENOMEM;
612 }
614 mutex_init(&bend->bend_mutex);
616 mutex_lock(&bend->bend_mutex);
618 /* ...and store it where we can get at it */
619 binfo = (struct backend_info *) dev->dev.driver_data;
620 binfo->netback_accel_priv = bend;
621 /* And vice-versa */
622 bend->hdev_data = dev;
624 DPRINTK("%s: Adding bend %p to list\n", __FUNCTION__, bend);
626 init_waitqueue_head(&bend->state_wait_queue);
627 bend->vnic_is_setup = 0;
628 bend->frontend_state = XenbusStateUnknown;
629 bend->backend_state = XenbusStateClosed;
630 bend->removing = 0;
632 sscanf(dev->nodename, NODENAME_PATH_FMT, &bend->far_end,
633 &bend->vif_num);
635 err = read_nicname(dev, bend);
636 if (err) {
637 /*
638 * Technically not an error, just means we're not
639 * supposed to accelerate this
640 */
641 DPRINTK("failed to get device name\n");
642 goto fail_nicname;
643 }
645 /*
646 * Look up the device name in the list of NICs provided by
647 * driverlink to get the hardware type.
648 */
649 err = netback_accel_sf_hwtype(bend);
650 if (err) {
651 /*
652 * Technically not an error, just means we're not
653 * supposed to accelerate this, probably belongs to
654 * some other backend
655 */
656 DPRINTK("failed to match device name\n");
657 goto fail_init_type;
658 }
660 err = publish_frontend_name(dev);
661 if (err)
662 goto fail_publish;
664 err = netback_accel_debugfs_create(bend);
665 if (err)
666 goto fail_debugfs;
668 mutex_unlock(&bend->bend_mutex);
670 err = setup_config_accel_watch(dev, bend);
671 if (err)
672 goto fail_config_watch;
674 err = setup_domu_accel_watch(dev, bend);
675 if (err)
676 goto fail_domu_watch;
678 /*
679 * Indicate to the other end that we're ready to start unless
680 * the watch has already fired.
681 */
682 mutex_lock(&bend->bend_mutex);
683 if (bend->backend_state == XenbusStateClosed) {
684 bend->backend_state = XenbusStateInitialising;
685 net_accel_update_state(dev, XenbusStateInitialising);
686 }
687 mutex_unlock(&bend->bend_mutex);
689 mutex_lock(&bend_list_mutex);
690 link_bend(bend);
691 mutex_unlock(&bend_list_mutex);
693 return 0;
695 fail_domu_watch:
697 unregister_xenbus_watch(&bend->config_accel_watch);
698 kfree(bend->config_accel_watch.node);
699 fail_config_watch:
701 /*
702 * Flush the scheduled work queue before freeing bend to get
703 * rid of any pending netback_accel_msg_rx_handler()
704 */
705 flush_scheduled_work();
707 mutex_lock(&bend->bend_mutex);
708 net_accel_update_state(dev, XenbusStateUnknown);
709 netback_accel_debugfs_remove(bend);
710 fail_debugfs:
712 unpublish_frontend_name(dev);
713 fail_publish:
715 /* No need to reverse netback_accel_sf_hwtype. */
716 fail_init_type:
718 kfree(bend->nicname);
719 fail_nicname:
720 binfo->netback_accel_priv = NULL;
721 mutex_unlock(&bend->bend_mutex);
722 kfree(bend);
723 return err;
724 }
727 int netback_accel_remove(struct xenbus_device *dev)
728 {
729 struct backend_info *binfo;
730 struct netback_accel *bend;
731 int frontend_state;
733 binfo = (struct backend_info *) dev->dev.driver_data;
734 bend = (struct netback_accel *) binfo->netback_accel_priv;
736 DPRINTK("%s: dev %p bend %p\n", __FUNCTION__, dev, bend);
738 BUG_ON(bend == NULL);
740 mutex_lock(&bend_list_mutex);
741 unlink_bend(bend);
742 mutex_unlock(&bend_list_mutex);
744 mutex_lock(&bend->bend_mutex);
746 /* Reject any requests to connect. */
747 bend->removing = 1;
749 /*
750 * Switch to closing to tell the other end that we're going
751 * away.
752 */
753 if (bend->backend_state != XenbusStateClosing) {
754 bend->backend_state = XenbusStateClosing;
755 net_accel_update_state(dev, XenbusStateClosing);
756 }
758 frontend_state = (int)XenbusStateUnknown;
759 xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
760 &frontend_state);
762 mutex_unlock(&bend->bend_mutex);
764 /*
765 * Wait until this end goes to the closed state. This happens
766 * in response to the other end going to the closed state.
767 * Don't bother doing this if the other end is already closed
768 * because if it is then there is nothing to do.
769 */
770 if (frontend_state != (int)XenbusStateClosed &&
771 frontend_state != (int)XenbusStateUnknown)
772 wait_event(bend->state_wait_queue,
773 bend->backend_state == XenbusStateClosed);
775 unregister_xenbus_watch(&bend->domu_accel_watch);
776 kfree(bend->domu_accel_watch.node);
778 unregister_xenbus_watch(&bend->config_accel_watch);
779 kfree(bend->config_accel_watch.node);
781 /*
782 * Flush the scheduled work queue before freeing bend to get
783 * rid of any pending netback_accel_msg_rx_handler()
784 */
785 flush_scheduled_work();
787 mutex_lock(&bend->bend_mutex);
789 /* Tear down the vnic if it was set up. */
790 if (bend->vnic_is_setup) {
791 bend->vnic_is_setup = 0;
792 cleanup_vnic(bend);
793 }
795 bend->backend_state = XenbusStateUnknown;
796 net_accel_update_state(dev, XenbusStateUnknown);
798 netback_accel_debugfs_remove(bend);
800 unpublish_frontend_name(dev);
802 kfree(bend->nicname);
804 binfo->netback_accel_priv = NULL;
806 mutex_unlock(&bend->bend_mutex);
808 kfree(bend);
810 return 0;
811 }
814 void netback_accel_shutdown_bends(void)
815 {
816 mutex_lock(&bend_list_mutex);
817 /*
818 * I think we should have had a remove callback for all
819 * interfaces before being allowed to unload the module
820 */
821 BUG_ON(bend_list != NULL);
822 mutex_unlock(&bend_list_mutex);
823 }
826 void netback_accel_set_closing(struct netback_accel *bend)
827 {
829 bend->backend_state = XenbusStateClosing;
830 net_accel_update_state((struct xenbus_device *)bend->hdev_data,
831 XenbusStateClosing);
832 }