ia64/linux-2.6.18-xen.hg

view drivers/xen/sfc_netfront/accel_msg.c @ 433:8c8a097cae69

Solarflare: Various build fixes, and make SFC drivers dependent on x86
From: Kieran Mansley <kmansley@solarflare.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 20 18:05:47 2008 +0000 (2008-02-20)
parents 651fc2abdd5d
children ad4d307bf9ce
line source
1 /****************************************************************************
2 * Solarflare driver for Xen network acceleration
3 *
4 * Copyright 2006-2008: Solarflare Communications Inc,
5 * 9501 Jeronimo Road, Suite 250,
6 * Irvine, CA 92618, USA
7 *
8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation, incorporated herein by reference.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 ****************************************************************************
23 */
25 #include <linux/stddef.h>
26 #include <linux/errno.h>
28 #include <xen/xenbus.h>
30 #include "accel.h"
31 #include "accel_msg_iface.h"
32 #include "accel_util.h"
33 #include "accel_bufs.h"
35 #include "netfront.h" /* drivers/xen/netfront/netfront.h */
37 static void vnic_start_interrupts(netfront_accel_vnic *vnic)
38 {
39 unsigned long flags;
41 /* Prime our interrupt */
42 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
43 if (!netfront_accel_vi_enable_interrupts(vnic)) {
44 /* Cripes, that was quick, better pass it up */
45 netfront_accel_disable_net_interrupts(vnic);
46 vnic->irq_enabled = 0;
47 NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
48 netif_rx_schedule(vnic->net_dev);
49 } else {
50 /*
51 * Nothing yet, make sure we get interrupts through
52 * back end
53 */
54 vnic->irq_enabled = 1;
55 netfront_accel_enable_net_interrupts(vnic);
56 }
57 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
58 }
61 static void vnic_stop_interrupts(netfront_accel_vnic *vnic)
62 {
63 unsigned long flags;
65 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
66 netfront_accel_disable_net_interrupts(vnic);
67 vnic->irq_enabled = 0;
68 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
69 }
72 static void vnic_start_fastpath(netfront_accel_vnic *vnic)
73 {
74 struct net_device *net_dev = vnic->net_dev;
75 unsigned long flags;
77 DPRINTK("%s\n", __FUNCTION__);
79 spin_lock_irqsave(&vnic->tx_lock, flags);
80 vnic->tx_enabled = 1;
81 spin_unlock_irqrestore(&vnic->tx_lock, flags);
83 netif_poll_disable(net_dev);
84 vnic->poll_enabled = 1;
85 netif_poll_enable(net_dev);
87 vnic_start_interrupts(vnic);
88 }
91 void vnic_stop_fastpath(netfront_accel_vnic *vnic)
92 {
93 struct net_device *net_dev = vnic->net_dev;
94 struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev);
95 unsigned long flags1, flags2;
97 DPRINTK("%s\n", __FUNCTION__);
99 vnic_stop_interrupts(vnic);
101 spin_lock_irqsave(&vnic->tx_lock, flags1);
102 vnic->tx_enabled = 0;
103 spin_lock_irqsave(&np->tx_lock, flags2);
104 if (vnic->tx_skb != NULL) {
105 dev_kfree_skb_any(vnic->tx_skb);
106 vnic->tx_skb = NULL;
107 if (netfront_check_queue_ready(net_dev)) {
108 netif_wake_queue(net_dev);
109 NETFRONT_ACCEL_STATS_OP
110 (vnic->stats.queue_wakes++);
111 }
112 }
113 spin_unlock_irqrestore(&np->tx_lock, flags2);
114 spin_unlock_irqrestore(&vnic->tx_lock, flags1);
116 /* Must prevent polls and hold lock to modify poll_enabled */
117 netif_poll_disable(net_dev);
118 spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
119 vnic->poll_enabled = 0;
120 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
121 netif_poll_enable(net_dev);
122 }
125 static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
126 {
128 if (!vnic->backend_netdev_up) {
129 vnic->backend_netdev_up = 1;
131 if (vnic->frontend_ready)
132 vnic_start_fastpath(vnic);
133 }
134 }
137 static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
138 {
140 if (vnic->backend_netdev_up) {
141 vnic->backend_netdev_up = 0;
143 if (vnic->frontend_ready)
144 vnic_stop_fastpath(vnic);
145 }
146 }
149 static int vnic_add_bufs(netfront_accel_vnic *vnic,
150 struct net_accel_msg *msg)
151 {
152 int rc, offset;
153 struct netfront_accel_bufinfo *bufinfo;
155 BUG_ON(msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
157 offset = msg->u.mapbufs.reqid;
159 if (offset < vnic->bufpages.max_pages -
160 (vnic->bufpages.max_pages / sfc_netfront_buffer_split)) {
161 bufinfo = vnic->rx_bufs;
162 } else
163 bufinfo = vnic->tx_bufs;
165 /* Queue up some Rx buffers to start things off. */
166 if ((rc = netfront_accel_add_bufs(&vnic->bufpages, bufinfo, msg)) == 0) {
167 netfront_accel_vi_add_bufs(vnic, bufinfo == vnic->rx_bufs);
169 if (offset + msg->u.mapbufs.pages == vnic->bufpages.max_pages) {
170 VPRINTK("%s: got all buffers back\n", __FUNCTION__);
171 vnic->frontend_ready = 1;
172 if (vnic->backend_netdev_up)
173 vnic_start_fastpath(vnic);
174 } else {
175 VPRINTK("%s: got buffers back %d %d\n", __FUNCTION__,
176 offset, msg->u.mapbufs.pages);
177 }
178 }
180 return rc;
181 }
184 /* The largest [o] such that (1u << o) <= n. Requires n > 0. */
186 inline unsigned log2_le(unsigned long n) {
187 unsigned order = 1;
188 while ((1ul << order) <= n) ++order;
189 return (order - 1);
190 }
192 static int vnic_send_buffer_requests(netfront_accel_vnic *vnic,
193 struct netfront_accel_bufpages *bufpages)
194 {
195 int pages, offset, rc = 0, sent = 0;
196 struct net_accel_msg msg;
198 while (bufpages->page_reqs < bufpages->max_pages) {
199 offset = bufpages->page_reqs;
201 pages = pow2(log2_le(bufpages->max_pages -
202 bufpages->page_reqs));
203 pages = pages < NET_ACCEL_MSG_MAX_PAGE_REQ ?
204 pages : NET_ACCEL_MSG_MAX_PAGE_REQ;
206 BUG_ON(offset < 0);
207 BUG_ON(pages <= 0);
209 rc = netfront_accel_buf_map_request(vnic->dev, bufpages,
210 &msg, pages, offset);
211 if (rc == 0) {
212 rc = net_accel_msg_send(vnic->shared_page,
213 &vnic->to_dom0, &msg);
214 if (rc < 0) {
215 VPRINTK("%s: queue full, stopping for now\n",
216 __FUNCTION__);
217 break;
218 }
219 sent++;
220 } else {
221 EPRINTK("%s: problem with grant, stopping for now\n",
222 __FUNCTION__);
223 break;
224 }
226 bufpages->page_reqs += pages;
227 }
229 if (sent)
230 net_accel_msg_notify(vnic->msg_channel_irq);
232 return rc;
233 }
236 /*
237 * In response to dom0 saying "my queue is full", we reply with this
238 * when it is no longer full
239 */
240 inline void vnic_set_queue_not_full(netfront_accel_vnic *vnic)
241 {
243 if (test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
244 (unsigned long *)&vnic->shared_page->aflags))
245 notify_remote_via_irq(vnic->msg_channel_irq);
246 else
247 VPRINTK("queue not full bit already set, not signalling\n");
248 }
250 /*
251 * Notify dom0 that the queue we want to use is full, it should
252 * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course
253 */
254 inline void vnic_set_queue_full(netfront_accel_vnic *vnic)
255 {
257 if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
258 (unsigned long *)&vnic->shared_page->aflags))
259 notify_remote_via_irq(vnic->msg_channel_irq);
260 else
261 VPRINTK("queue full bit already set, not signalling\n");
262 }
265 static int vnic_check_hello_version(unsigned version)
266 {
267 if (version > NET_ACCEL_MSG_VERSION) {
268 /* Newer protocol, we must refuse */
269 return -EPROTO;
270 }
272 if (version < NET_ACCEL_MSG_VERSION) {
273 /*
274 * We are newer, so have discretion to accept if we
275 * wish. For now however, just reject
276 */
277 return -EPROTO;
278 }
280 BUG_ON(version != NET_ACCEL_MSG_VERSION);
281 return 0;
282 }
285 static int vnic_process_hello_msg(netfront_accel_vnic *vnic,
286 struct net_accel_msg *msg)
287 {
288 int err = 0;
289 unsigned pages = sfc_netfront_max_pages;
291 if (vnic_check_hello_version(msg->u.hello.version) < 0) {
292 msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY
293 | NET_ACCEL_MSG_ERROR;
294 msg->u.hello.version = NET_ACCEL_MSG_VERSION;
295 } else {
296 vnic->backend_netdev_up
297 = vnic->shared_page->net_dev_up;
299 msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY;
300 msg->u.hello.version = NET_ACCEL_MSG_VERSION;
301 if (msg->u.hello.max_pages &&
302 msg->u.hello.max_pages < pages)
303 pages = msg->u.hello.max_pages;
304 msg->u.hello.max_pages = pages;
306 /* Half of pages for rx, half for tx */
307 err = netfront_accel_alloc_buffer_mem(&vnic->bufpages,
308 vnic->rx_bufs,
309 vnic->tx_bufs,
310 pages);
311 if (err)
312 msg->id |= NET_ACCEL_MSG_ERROR;
313 }
315 /* Send reply */
316 net_accel_msg_reply_notify(vnic->shared_page, vnic->msg_channel_irq,
317 &vnic->to_dom0, msg);
318 return err;
319 }
322 static int vnic_process_localmac_msg(netfront_accel_vnic *vnic,
323 struct net_accel_msg *msg)
324 {
325 unsigned long flags;
326 cuckoo_hash_mac_key key;
328 if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
329 DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
330 MAC_ARG(msg->u.localmac.mac));
331 key = cuckoo_mac_to_key(msg->u.localmac.mac);
332 spin_lock_irqsave(&vnic->table_lock, flags);
333 /* Try to remove it, not a big deal if not there */
334 cuckoo_hash_remove(&vnic->fastpath_table,
335 (cuckoo_hash_key *)&key);
336 spin_unlock_irqrestore(&vnic->table_lock, flags);
337 }
339 return 0;
340 }
343 static
344 int vnic_process_rx_msg(netfront_accel_vnic *vnic,
345 struct net_accel_msg *msg)
346 {
347 int err;
349 switch (msg->id) {
350 case NET_ACCEL_MSG_HELLO:
351 /* Hello, reply with Reply */
352 DPRINTK("got Hello, with version %.8x\n",
353 msg->u.hello.version);
354 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_NONE);
355 err = vnic_process_hello_msg(vnic, msg);
356 if (err == 0)
357 vnic->msg_state = NETFRONT_ACCEL_MSG_HELLO;
358 break;
359 case NET_ACCEL_MSG_SETHW:
360 /* Hardware info message */
361 DPRINTK("got H/W info\n");
362 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HELLO);
363 err = netfront_accel_vi_init(vnic, &msg->u.hw);
364 if (err == 0)
365 vnic->msg_state = NETFRONT_ACCEL_MSG_HW;
366 break;
367 case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY:
368 VPRINTK("Got mapped buffers back\n");
369 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
370 err = vnic_add_bufs(vnic, msg);
371 break;
372 case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_ERROR:
373 /* No buffers. Can't use the fast path. */
374 EPRINTK("Got mapped buffers error. Cannot accelerate.\n");
375 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
376 err = -EIO;
377 break;
378 case NET_ACCEL_MSG_LOCALMAC:
379 /* Should be add, remove not currently used */
380 EPRINTK_ON(!(msg->u.localmac.flags & NET_ACCEL_MSG_ADD));
381 BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
382 err = vnic_process_localmac_msg(vnic, msg);
383 break;
384 default:
385 EPRINTK("Huh? Message code is 0x%x\n", msg->id);
386 err = -EPROTO;
387 break;
388 }
390 return err;
391 }
394 /* Process an IRQ received from back end driver */
395 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
396 void netfront_accel_msg_from_bend(struct work_struct *context)
397 #else
398 void netfront_accel_msg_from_bend(void *context)
399 #endif
400 {
401 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
402 netfront_accel_vnic *vnic =
403 container_of(context, netfront_accel_vnic, msg_from_bend);
404 #else
405 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
406 #endif
407 struct net_accel_msg msg;
408 int err, queue_was_full = 0;
410 mutex_lock(&vnic->vnic_mutex);
412 /*
413 * This happens when the shared pages have been unmapped but
414 * the workqueue has yet to be flushed
415 */
416 if (!vnic->dom0_state_is_setup)
417 goto unlock_out;
419 while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK)
420 != 0) {
421 if (vnic->shared_page->aflags &
422 NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) {
423 /* We've been told there may now be space. */
424 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
425 (unsigned long *)&vnic->shared_page->aflags);
426 }
428 if (vnic->shared_page->aflags &
429 NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) {
430 /*
431 * There will be space at the end of this
432 * function if we can make any.
433 */
434 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
435 (unsigned long *)&vnic->shared_page->aflags);
436 queue_was_full = 1;
437 }
439 if (vnic->shared_page->aflags &
440 NET_ACCEL_MSG_AFLAGS_NETUPDOWN) {
441 DPRINTK("%s: net interface change\n", __FUNCTION__);
442 clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
443 (unsigned long *)&vnic->shared_page->aflags);
444 if (vnic->shared_page->net_dev_up)
445 netfront_accel_interface_up(vnic);
446 else
447 netfront_accel_interface_down(vnic);
448 }
449 }
451 /* Pull msg out of shared memory */
452 while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0,
453 &msg)) == 0) {
454 err = vnic_process_rx_msg(vnic, &msg);
456 if (err != 0)
457 goto done;
458 }
460 /*
461 * Send any pending buffer map request messages that we can,
462 * and mark domU->dom0 as full if necessary.
463 */
464 if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW &&
465 vnic->bufpages.page_reqs < vnic->bufpages.max_pages) {
466 if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC)
467 vnic_set_queue_full(vnic);
468 }
470 /*
471 * If there are no messages then this is not an error. It
472 * just means that we've finished processing the queue.
473 */
474 if (err == -ENOENT)
475 err = 0;
476 done:
477 /* We will now have made space in the dom0->domU queue if we can */
478 if (queue_was_full)
479 vnic_set_queue_not_full(vnic);
481 if (err != 0) {
482 EPRINTK("%s returned %d\n", __FUNCTION__, err);
483 netfront_accel_set_closing(vnic);
484 }
486 unlock_out:
487 mutex_unlock(&vnic->vnic_mutex);
489 return;
490 }
493 irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
494 struct pt_regs *unused)
495 {
496 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
497 VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename);
499 queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
501 return IRQ_HANDLED;
502 }
504 /* Process an interrupt received from the NIC via backend */
505 irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
506 struct pt_regs *unused)
507 {
508 netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
509 struct net_device *net_dev = vnic->net_dev;
510 unsigned long flags;
512 VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename);
514 NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++);
516 BUG_ON(net_dev==NULL);
518 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
519 if (vnic->irq_enabled) {
520 netfront_accel_disable_net_interrupts(vnic);
521 vnic->irq_enabled = 0;
522 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
524 #if NETFRONT_ACCEL_STATS
525 vnic->stats.poll_schedule_count++;
526 if (vnic->stats.event_count_since_irq >
527 vnic->stats.events_per_irq_max)
528 vnic->stats.events_per_irq_max =
529 vnic->stats.event_count_since_irq;
530 vnic->stats.event_count_since_irq = 0;
531 #endif
532 netif_rx_schedule(net_dev);
533 }
534 else {
535 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
536 NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++);
537 DPRINTK("%s: irq when disabled\n", __FUNCTION__);
538 }
540 return IRQ_HANDLED;
541 }
544 void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
545 u32 ip, u16 port, u8 protocol)
546 {
547 unsigned long lock_state;
548 struct net_accel_msg *msg;
550 msg = net_accel_msg_start_send(vnic->shared_page, &vnic->to_dom0,
551 &lock_state);
553 if (msg == NULL)
554 return;
556 net_accel_msg_init(msg, NET_ACCEL_MSG_FASTPATH);
557 msg->u.fastpath.flags = NET_ACCEL_MSG_REMOVE;
558 memcpy(msg->u.fastpath.mac, mac, ETH_ALEN);
560 msg->u.fastpath.port = port;
561 msg->u.fastpath.ip = ip;
562 msg->u.fastpath.proto = protocol;
564 net_accel_msg_complete_send_notify(vnic->shared_page, &vnic->to_dom0,
565 &lock_state, vnic->msg_channel_irq);
566 }