ia64/linux-2.6.18-xen.hg

annotate drivers/xen/sfc_netfront/accel_msg.c @ 847:ad4d307bf9ce

net sfc: Update sfc and sfc_resource driver to latest release

...and update sfc_netfront, sfc_netback, sfc_netutil for any API changes

sfc_netback: Fix asymmetric use of SFC buffer table alloc and free
sfc_netback: Clean up if no SFC accel device found
sfc_netback: Gracefully handle case where page grant fails
sfc_netback: Disable net acceleration if the physical link goes down
sfc_netfront: Less verbose error messages, more verbose counters for
rx discard errors
sfc_netfront: Gracefully handle case where SFC netfront fails during
initialisation

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:59:10 2009 +0100 (2009-03-31)
parents 8c8a097cae69
children
rev   line source
keir@426 1 /****************************************************************************
keir@426 2 * Solarflare driver for Xen network acceleration
keir@426 3 *
keir@426 4 * Copyright 2006-2008: Solarflare Communications Inc,
keir@426 5 * 9501 Jeronimo Road, Suite 250,
keir@426 6 * Irvine, CA 92618, USA
keir@426 7 *
keir@426 8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
keir@426 9 *
keir@426 10 * This program is free software; you can redistribute it and/or modify it
keir@426 11 * under the terms of the GNU General Public License version 2 as published
keir@426 12 * by the Free Software Foundation, incorporated herein by reference.
keir@426 13 *
keir@426 14 * This program is distributed in the hope that it will be useful,
keir@426 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
keir@426 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
keir@426 17 * GNU General Public License for more details.
keir@426 18 *
keir@426 19 * You should have received a copy of the GNU General Public License
keir@426 20 * along with this program; if not, write to the Free Software
keir@426 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
keir@426 22 ****************************************************************************
keir@426 23 */
keir@426 24
keir@426 25 #include <linux/stddef.h>
keir@426 26 #include <linux/errno.h>
keir@426 27
keir@426 28 #include <xen/xenbus.h>
keir@426 29
keir@426 30 #include "accel.h"
keir@426 31 #include "accel_msg_iface.h"
keir@426 32 #include "accel_util.h"
keir@426 33 #include "accel_bufs.h"
keir@426 34
keir@426 35 #include "netfront.h" /* drivers/xen/netfront/netfront.h */
keir@426 36
keir@426 37 static void vnic_start_interrupts(netfront_accel_vnic *vnic)
keir@426 38 {
keir@426 39 unsigned long flags;
keir@426 40
keir@426 41 /* Prime our interrupt */
keir@426 42 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
keir@426 43 if (!netfront_accel_vi_enable_interrupts(vnic)) {
keir@426 44 /* Cripes, that was quick, better pass it up */
keir@426 45 netfront_accel_disable_net_interrupts(vnic);
keir@426 46 vnic->irq_enabled = 0;
keir@426 47 NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
keir@426 48 netif_rx_schedule(vnic->net_dev);
keir@426 49 } else {
keir@426 50 /*
keir@426 51 * Nothing yet, make sure we get interrupts through
keir@426 52 * back end
keir@426 53 */
keir@426 54 vnic->irq_enabled = 1;
keir@426 55 netfront_accel_enable_net_interrupts(vnic);
keir@426 56 }
keir@426 57 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
keir@426 58 }
keir@426 59
keir@426 60
keir@426 61 static void vnic_stop_interrupts(netfront_accel_vnic *vnic)
keir@426 62 {
keir@426 63 unsigned long flags;
keir@426 64
keir@426 65 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
keir@426 66 netfront_accel_disable_net_interrupts(vnic);
keir@426 67 vnic->irq_enabled = 0;
keir@426 68 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
keir@426 69 }
keir@426 70
keir@426 71
keir@426 72 static void vnic_start_fastpath(netfront_accel_vnic *vnic)
keir@426 73 {
keir@426 74 struct net_device *net_dev = vnic->net_dev;
keir@426 75 unsigned long flags;
keir@426 76
keir@426 77 DPRINTK("%s\n", __FUNCTION__);
keir@426 78
keir@426 79 spin_lock_irqsave(&vnic->tx_lock, flags);
keir@426 80 vnic->tx_enabled = 1;
keir@426 81 spin_unlock_irqrestore(&vnic->tx_lock, flags);
keir@426 82
keir@426 83 netif_poll_disable(net_dev);
keir@426 84 vnic->poll_enabled = 1;
keir@426 85 netif_poll_enable(net_dev);
keir@426 86
keir@426 87 vnic_start_interrupts(vnic);
keir@426 88 }
keir@426 89
keir@426 90
keir@426 91 void vnic_stop_fastpath(netfront_accel_vnic *vnic)
keir@426 92 {
keir@426 93 struct net_device *net_dev = vnic->net_dev;
keir@426 94 struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev);
keir@426 95 unsigned long flags1, flags2;
keir@426 96
keir@426 97 DPRINTK("%s\n", __FUNCTION__);
keir@426 98
keir@426 99 vnic_stop_interrupts(vnic);
keir@426 100
keir@426 101 spin_lock_irqsave(&vnic->tx_lock, flags1);
keir@426 102 vnic->tx_enabled = 0;
keir@426 103 spin_lock_irqsave(&np->tx_lock, flags2);
keir@426 104 if (vnic->tx_skb != NULL) {
keir@426 105 dev_kfree_skb_any(vnic->tx_skb);
keir@426 106 vnic->tx_skb = NULL;
keir@426 107 if (netfront_check_queue_ready(net_dev)) {
keir@426 108 netif_wake_queue(net_dev);
keir@426 109 NETFRONT_ACCEL_STATS_OP
keir@426 110 (vnic->stats.queue_wakes++);
keir@426 111 }
keir@426 112 }
keir@426 113 spin_unlock_irqrestore(&np->tx_lock, flags2);
keir@426 114 spin_unlock_irqrestore(&vnic->tx_lock, flags1);
keir@426 115
keir@426 116 /* Must prevent polls and hold lock to modify poll_enabled */
keir@426 117 netif_poll_disable(net_dev);
keir@426 118 spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
keir@426 119 vnic->poll_enabled = 0;
keir@426 120 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
keir@426 121 netif_poll_enable(net_dev);
keir@426 122 }
keir@426 123
keir@426 124
keir@426 125 static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
keir@426 126 {
keir@426 127 if (!vnic->backend_netdev_up) {
keir@426 128 vnic->backend_netdev_up = 1;
keir@426 129
keir@426 130 if (vnic->frontend_ready)
keir@426 131 vnic_start_fastpath(vnic);
keir@426 132 }
keir@426 133 }
keir@426 134
keir@426 135
keir@426 136 static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
keir@426 137 {
keir@426 138 if (vnic->backend_netdev_up) {
keir@426 139 vnic->backend_netdev_up = 0;
keir@426 140
keir@426 141 if (vnic->frontend_ready)
keir@426 142 vnic_stop_fastpath(vnic);
keir@426 143 }
keir@426 144 }
keir@426 145
keir@426 146
keir@426 147 static int vnic_add_bufs(netfront_accel_vnic *vnic,
keir@426 148 struct net_accel_msg *msg)
keir@426 149 {
keir@426 150 int rc, offset;
keir@426 151 struct netfront_accel_bufinfo *bufinfo;
keir@426 152
keir@426 153 BUG_ON(msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
keir@426 154
keir@426 155 offset = msg->u.mapbufs.reqid;
keir@426 156
keir@426 157 if (offset < vnic->bufpages.max_pages -
keir@433 158 (vnic->bufpages.max_pages / sfc_netfront_buffer_split)) {
keir@426 159 bufinfo = vnic->rx_bufs;
keir@426 160 } else
keir@426 161 bufinfo = vnic->tx_bufs;
keir@426 162
keir@426 163 /* Queue up some Rx buffers to start things off. */
keir@426 164 if ((rc = netfront_accel_add_bufs(&vnic->bufpages, bufinfo, msg)) == 0) {
keir@426 165 netfront_accel_vi_add_bufs(vnic, bufinfo == vnic->rx_bufs);
keir@426 166
keir@426 167 if (offset + msg->u.mapbufs.pages == vnic->bufpages.max_pages) {
keir@426 168 VPRINTK("%s: got all buffers back\n", __FUNCTION__);
keir@426 169 vnic->frontend_ready = 1;
keir@426 170 if (vnic->backend_netdev_up)
keir@426 171 vnic_start_fastpath(vnic);
keir@426 172 } else {
keir@426 173 VPRINTK("%s: got buffers back %d %d\n", __FUNCTION__,
keir@426 174 offset, msg->u.mapbufs.pages);
keir@426 175 }
keir@426 176 }
keir@426 177
keir@426 178 return rc;
keir@426 179 }
keir@426 180
keir@426 181
keir@426 182 /* The largest [o] such that (1u << o) <= n. Requires n > 0. */
keir@426 183
keir@426 184 inline unsigned log2_le(unsigned long n) {
keir@426 185 unsigned order = 1;
keir@426 186 while ((1ul << order) <= n) ++order;
keir@426 187 return (order - 1);
keir@426 188 }
keir@426 189
keir@426 190 static int vnic_send_buffer_requests(netfront_accel_vnic *vnic,
keir@426 191 struct netfront_accel_bufpages *bufpages)
keir@426 192 {
keir@426 193 int pages, offset, rc = 0, sent = 0;
keir@426 194 struct net_accel_msg msg;
keir@426 195
keir@426 196 while (bufpages->page_reqs < bufpages->max_pages) {
keir@426 197 offset = bufpages->page_reqs;
keir@426 198
keir@426 199 pages = pow2(log2_le(bufpages->max_pages -
keir@426 200 bufpages->page_reqs));
keir@426 201 pages = pages < NET_ACCEL_MSG_MAX_PAGE_REQ ?
keir@426 202 pages : NET_ACCEL_MSG_MAX_PAGE_REQ;
keir@426 203
keir@426 204 BUG_ON(offset < 0);
keir@426 205 BUG_ON(pages <= 0);
keir@426 206
keir@426 207 rc = netfront_accel_buf_map_request(vnic->dev, bufpages,
keir@426 208 &msg, pages, offset);
keir@426 209 if (rc == 0) {
keir@426 210 rc = net_accel_msg_send(vnic->shared_page,
keir@426 211 &vnic->to_dom0, &msg);
keir@426 212 if (rc < 0) {
keir@426 213 VPRINTK("%s: queue full, stopping for now\n",
keir@426 214 __FUNCTION__);
keir@426 215 break;
keir@426 216 }
keir@426 217 sent++;
keir@426 218 } else {
keir@426 219 EPRINTK("%s: problem with grant, stopping for now\n",
keir@426 220 __FUNCTION__);
keir@426 221 break;
keir@426 222 }
keir@426 223
keir@426 224 bufpages->page_reqs += pages;
keir@426 225 }
keir@426 226
keir@426 227 if (sent)
keir@426 228 net_accel_msg_notify(vnic->msg_channel_irq);
keir@426 229
keir@426 230 return rc;
keir@426 231 }
keir@426 232
keir@426 233
keir@426 234 /*
keir@426 235 * In response to dom0 saying "my queue is full", we reply with this
keir@426 236 * when it is no longer full
keir@426 237 */
keir@426 238 inline void vnic_set_queue_not_full(netfront_accel_vnic *vnic)
keir@426 239 {
keir@426 240
keir@426 241 if (test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
keir@426 242 (unsigned long *)&vnic->shared_page->aflags))
keir@426 243 notify_remote_via_irq(vnic->msg_channel_irq);
keir@426 244 else
keir@426 245 VPRINTK("queue not full bit already set, not signalling\n");
keir@426 246 }
keir@426 247
keir@426 248 /*
keir@426 249 * Notify dom0 that the queue we want to use is full, it should
keir@426 250 * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course
keir@426 251 */
keir@426 252 inline void vnic_set_queue_full(netfront_accel_vnic *vnic)
keir@426 253 {
keir@426 254
keir@426 255 if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
keir@426 256 (unsigned long *)&vnic->shared_page->aflags))
keir@426 257 notify_remote_via_irq(vnic->msg_channel_irq);
keir@426 258 else
keir@426 259 VPRINTK("queue full bit already set, not signalling\n");
keir@426 260 }
keir@426 261
keir@426 262
keir@426 263 static int vnic_check_hello_version(unsigned version)
keir@426 264 {
keir@426 265 if (version > NET_ACCEL_MSG_VERSION) {
keir@426 266 /* Newer protocol, we must refuse */
keir@426 267 return -EPROTO;
keir@426 268 }
keir@426 269
keir@426 270 if (version < NET_ACCEL_MSG_VERSION) {
keir@426 271 /*
keir@426 272 * We are newer, so have discretion to accept if we
keir@426 273 * wish. For now however, just reject
keir@426 274 */
keir@426 275 return -EPROTO;
keir@426 276 }
keir@426 277
keir@426 278 BUG_ON(version != NET_ACCEL_MSG_VERSION);
keir@426 279 return 0;
keir@426 280 }
keir@426 281
keir@426 282
keir@426 283 static int vnic_process_hello_msg(netfront_accel_vnic *vnic,
keir@426 284 struct net_accel_msg *msg)
keir@426 285 {
keir@426 286 int err = 0;
keir@433 287 unsigned pages = sfc_netfront_max_pages;
keir@426 288
keir@426 289 if (vnic_check_hello_version(msg->u.hello.version) < 0) {
keir@426 290 msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY
keir@426 291 | NET_ACCEL_MSG_ERROR;
keir@426 292 msg->u.hello.version = NET_ACCEL_MSG_VERSION;
keir@426 293 } else {
keir@426 294 vnic->backend_netdev_up
keir@426 295 = vnic->shared_page->net_dev_up;
keir@426 296
keir@426 297 msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY;
keir@426 298 msg->u.hello.version = NET_ACCEL_MSG_VERSION;
keir@426 299 if (msg->u.hello.max_pages &&
keir@426 300 msg->u.hello.max_pages < pages)
keir@426 301 pages = msg->u.hello.max_pages;
keir@426 302 msg->u.hello.max_pages = pages;
keir@426 303
keir@426 304 /* Half of pages for rx, half for tx */
keir@426 305 err = netfront_accel_alloc_buffer_mem(&vnic->bufpages,
keir@426 306 vnic->rx_bufs,
keir@426 307 vnic->tx_bufs,
keir@426 308 pages);
keir@426 309 if (err)
keir@426 310 msg->id |= NET_ACCEL_MSG_ERROR;
keir@426 311 }
keir@426 312
keir@426 313 /* Send reply */
keir@426 314 net_accel_msg_reply_notify(vnic->shared_page, vnic->msg_channel_irq,
keir@426 315 &vnic->to_dom0, msg);
keir@426 316 return err;
keir@426 317 }
keir@426 318
keir@426 319
keir@426 320 static int vnic_process_localmac_msg(netfront_accel_vnic *vnic,
keir@426 321 struct net_accel_msg *msg)
keir@426 322 {
keir@426 323 unsigned long flags;
keir@426 324 cuckoo_hash_mac_key key;
keir@426 325
keir@426 326 if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
keir@426 327 DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
keir@426 328 MAC_ARG(msg->u.localmac.mac));
keir@426 329 key = cuckoo_mac_to_key(msg->u.localmac.mac);
keir@426 330 spin_lock_irqsave(&vnic->table_lock, flags);
keir@426 331 /* Try to remove it, not a big deal if not there */
keir@426 332 cuckoo_hash_remove(&vnic->fastpath_table,
keir@426 333 (cuckoo_hash_key *)&key);
keir@426 334 spin_unlock_irqrestore(&vnic->table_lock, flags);
keir@426 335 }
keir@426 336
keir@426 337 return 0;
keir@426 338 }
keir@426 339
keir@426 340
keir@426 341 static
keir@426 342 int vnic_process_rx_msg(netfront_accel_vnic *vnic,
keir@426 343 struct net_accel_msg *msg)
keir@426 344 {
keir@426 345 int err;
keir@426 346
keir@426 347 switch (msg->id) {
keir@426 348 case NET_ACCEL_MSG_HELLO:
keir@426 349 /* Hello, reply with Reply */
keir@426 350 DPRINTK("got Hello, with version %.8x\n",
keir@426 351 msg->u.hello.version);
keir@426 352 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_NONE);
keir@426 353 err = vnic_process_hello_msg(vnic, msg);
keir@426 354 if (err == 0)
keir@426 355 vnic->msg_state = NETFRONT_ACCEL_MSG_HELLO;
keir@426 356 break;
keir@426 357 case NET_ACCEL_MSG_SETHW:
keir@426 358 /* Hardware info message */
keir@426 359 DPRINTK("got H/W info\n");
keir@426 360 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HELLO);
keir@426 361 err = netfront_accel_vi_init(vnic, &msg->u.hw);
keir@426 362 if (err == 0)
keir@426 363 vnic->msg_state = NETFRONT_ACCEL_MSG_HW;
keir@426 364 break;
keir@426 365 case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY:
keir@426 366 VPRINTK("Got mapped buffers back\n");
keir@426 367 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
keir@426 368 err = vnic_add_bufs(vnic, msg);
keir@426 369 break;
keir@426 370 case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_ERROR:
keir@426 371 /* No buffers. Can't use the fast path. */
keir@426 372 EPRINTK("Got mapped buffers error. Cannot accelerate.\n");
keir@426 373 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
keir@426 374 err = -EIO;
keir@426 375 break;
keir@426 376 case NET_ACCEL_MSG_LOCALMAC:
keir@426 377 /* Should be add, remove not currently used */
keir@426 378 EPRINTK_ON(!(msg->u.localmac.flags & NET_ACCEL_MSG_ADD));
keir@426 379 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
keir@426 380 err = vnic_process_localmac_msg(vnic, msg);
keir@426 381 break;
keir@426 382 default:
keir@426 383 EPRINTK("Huh? Message code is 0x%x\n", msg->id);
keir@426 384 err = -EPROTO;
keir@426 385 break;
keir@426 386 }
keir@426 387
keir@426 388 return err;
keir@426 389 }
keir@426 390
keir@426 391
keir@426 392 /* Process an IRQ received from back end driver */
keir@426 393 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
keir@426 394 void netfront_accel_msg_from_bend(struct work_struct *context)
keir@426 395 #else
keir@426 396 void netfront_accel_msg_from_bend(void *context)
keir@426 397 #endif
keir@426 398 {
keir@426 399 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
keir@426 400 netfront_accel_vnic *vnic =
keir@426 401 container_of(context, netfront_accel_vnic, msg_from_bend);
keir@426 402 #else
keir@426 403 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
keir@426 404 #endif
keir@426 405 struct net_accel_msg msg;
keir@426 406 int err, queue_was_full = 0;
keir@426 407
keir@426 408 mutex_lock(&vnic->vnic_mutex);
keir@426 409
keir@426 410 /*
keir@426 411 * This happens when the shared pages have been unmapped but
keir@426 412 * the workqueue has yet to be flushed
keir@426 413 */
keir@426 414 if (!vnic->dom0_state_is_setup)
keir@426 415 goto unlock_out;
keir@426 416
keir@426 417 while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK)
keir@426 418 != 0) {
keir@426 419 if (vnic->shared_page->aflags &
keir@426 420 NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) {
keir@426 421 /* We've been told there may now be space. */
keir@426 422 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
keir@426 423 (unsigned long *)&vnic->shared_page->aflags);
keir@426 424 }
keir@426 425
keir@426 426 if (vnic->shared_page->aflags &
keir@426 427 NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) {
keir@426 428 /*
keir@426 429 * There will be space at the end of this
keir@426 430 * function if we can make any.
keir@426 431 */
keir@426 432 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
keir@426 433 (unsigned long *)&vnic->shared_page->aflags);
keir@426 434 queue_was_full = 1;
keir@426 435 }
keir@426 436
keir@426 437 if (vnic->shared_page->aflags &
keir@426 438 NET_ACCEL_MSG_AFLAGS_NETUPDOWN) {
keir@426 439 DPRINTK("%s: net interface change\n", __FUNCTION__);
keir@426 440 clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
keir@426 441 (unsigned long *)&vnic->shared_page->aflags);
keir@426 442 if (vnic->shared_page->net_dev_up)
keir@426 443 netfront_accel_interface_up(vnic);
keir@426 444 else
keir@426 445 netfront_accel_interface_down(vnic);
keir@426 446 }
keir@426 447 }
keir@426 448
keir@426 449 /* Pull msg out of shared memory */
keir@426 450 while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0,
keir@426 451 &msg)) == 0) {
keir@426 452 err = vnic_process_rx_msg(vnic, &msg);
keir@426 453
keir@426 454 if (err != 0)
keir@426 455 goto done;
keir@426 456 }
keir@426 457
keir@426 458 /*
keir@426 459 * Send any pending buffer map request messages that we can,
keir@426 460 * and mark domU->dom0 as full if necessary.
keir@426 461 */
keir@426 462 if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW &&
keir@426 463 vnic->bufpages.page_reqs < vnic->bufpages.max_pages) {
keir@426 464 if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC)
keir@426 465 vnic_set_queue_full(vnic);
keir@426 466 }
keir@426 467
keir@426 468 /*
keir@426 469 * If there are no messages then this is not an error. It
keir@426 470 * just means that we've finished processing the queue.
keir@426 471 */
keir@426 472 if (err == -ENOENT)
keir@426 473 err = 0;
keir@426 474 done:
keir@426 475 /* We will now have made space in the dom0->domU queue if we can */
keir@426 476 if (queue_was_full)
keir@426 477 vnic_set_queue_not_full(vnic);
keir@426 478
keir@426 479 if (err != 0) {
keir@426 480 EPRINTK("%s returned %d\n", __FUNCTION__, err);
keir@426 481 netfront_accel_set_closing(vnic);
keir@426 482 }
keir@426 483
keir@426 484 unlock_out:
keir@426 485 mutex_unlock(&vnic->vnic_mutex);
keir@426 486
keir@426 487 return;
keir@426 488 }
keir@426 489
keir@426 490
keir@426 491 irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
keir@426 492 struct pt_regs *unused)
keir@426 493 {
keir@426 494 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
keir@426 495 VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename);
keir@426 496
keir@426 497 queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
keir@426 498
keir@426 499 return IRQ_HANDLED;
keir@426 500 }
keir@426 501
keir@426 502 /* Process an interrupt received from the NIC via backend */
keir@426 503 irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
keir@426 504 struct pt_regs *unused)
keir@426 505 {
keir@426 506 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
keir@426 507 struct net_device *net_dev = vnic->net_dev;
keir@426 508 unsigned long flags;
keir@426 509
keir@426 510 VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename);
keir@426 511
keir@426 512 NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++);
keir@426 513
keir@426 514 BUG_ON(net_dev==NULL);
keir@426 515
keir@426 516 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
keir@426 517 if (vnic->irq_enabled) {
keir@426 518 netfront_accel_disable_net_interrupts(vnic);
keir@426 519 vnic->irq_enabled = 0;
keir@426 520 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
keir@426 521
keir@426 522 #if NETFRONT_ACCEL_STATS
keir@426 523 vnic->stats.poll_schedule_count++;
keir@426 524 if (vnic->stats.event_count_since_irq >
keir@426 525 vnic->stats.events_per_irq_max)
keir@426 526 vnic->stats.events_per_irq_max =
keir@426 527 vnic->stats.event_count_since_irq;
keir@426 528 vnic->stats.event_count_since_irq = 0;
keir@426 529 #endif
keir@426 530 netif_rx_schedule(net_dev);
keir@426 531 }
keir@426 532 else {
keir@426 533 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
keir@426 534 NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++);
keir@426 535 DPRINTK("%s: irq when disabled\n", __FUNCTION__);
keir@426 536 }
keir@426 537
keir@426 538 return IRQ_HANDLED;
keir@426 539 }
keir@426 540
keir@426 541
keir@426 542 void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
keir@426 543 u32 ip, u16 port, u8 protocol)
keir@426 544 {
keir@426 545 unsigned long lock_state;
keir@426 546 struct net_accel_msg *msg;
keir@426 547
keir@426 548 msg = net_accel_msg_start_send(vnic->shared_page, &vnic->to_dom0,
keir@426 549 &lock_state);
keir@426 550
keir@426 551 if (msg == NULL)
keir@426 552 return;
keir@426 553
keir@426 554 net_accel_msg_init(msg, NET_ACCEL_MSG_FASTPATH);
keir@426 555 msg->u.fastpath.flags = NET_ACCEL_MSG_REMOVE;
keir@426 556 memcpy(msg->u.fastpath.mac, mac, ETH_ALEN);
keir@426 557
keir@426 558 msg->u.fastpath.port = port;
keir@426 559 msg->u.fastpath.ip = ip;
keir@426 560 msg->u.fastpath.proto = protocol;
keir@426 561
keir@426 562 net_accel_msg_complete_send_notify(vnic->shared_page, &vnic->to_dom0,
keir@426 563 &lock_state, vnic->msg_channel_irq);
keir@426 564 }