ia64/xen-unstable

view extras/mini-os/netfront.c @ 16903:1fe8c8a275c9

Remove tools/guest-headers; no longer needed.

Signed-off-by: Bastian Blank <waldi@debian.org>
author Keir Fraser <keir.fraser@citrix.com>
date Sat Jan 26 15:26:15 2008 +0000 (2008-01-26)
parents 95558b4a6714
children c9844192c965
line source
1 /* Minimal network driver for Mini-OS.
2 * Copyright (c) 2006-2007 Jacob Gorm Hansen, University of Copenhagen.
3 * Based on netfront.c from Xen Linux.
4 *
5 * Does not handle fragments or extras.
6 */
8 #include <os.h>
9 #include <xenbus.h>
10 #include <events.h>
11 #include <errno.h>
12 #include <xen/io/netif.h>
13 #include <gnttab.h>
14 #include <xmalloc.h>
15 #include <time.h>
16 #include <netfront.h>
17 #include <lib.h>
18 #include <semaphore.h>
20 DECLARE_WAIT_QUEUE_HEAD(netfront_queue);
22 #define NETIF_SELECT_RX ((void*)-1)
25 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
26 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
27 #define GRANT_INVALID_REF 0
30 struct net_buffer {
31 void* page;
32 grant_ref_t gref;
33 };
35 struct netfront_dev {
36 unsigned short tx_freelist[NET_TX_RING_SIZE];
37 struct semaphore tx_sem;
39 struct net_buffer rx_buffers[NET_RX_RING_SIZE];
40 struct net_buffer tx_buffers[NET_TX_RING_SIZE];
42 struct netif_tx_front_ring tx;
43 struct netif_rx_front_ring rx;
44 grant_ref_t tx_ring_ref;
45 grant_ref_t rx_ring_ref;
46 evtchn_port_t evtchn, local_port;
48 char *nodename;
49 char *backend;
51 void (*netif_rx)(unsigned char* data, int len);
52 };
54 void init_rx_buffers(struct netfront_dev *dev);
56 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
57 {
58 freelist[id] = freelist[0];
59 freelist[0] = id;
60 }
62 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
63 {
64 unsigned int id = freelist[0];
65 freelist[0] = freelist[id];
66 return id;
67 }
69 __attribute__((weak)) void netif_rx(unsigned char* data,int len)
70 {
71 printk("%d bytes incoming at %p\n",len,data);
72 }
74 __attribute__((weak)) void net_app_main(void*si,unsigned char*mac) {}
76 static inline int xennet_rxidx(RING_IDX idx)
77 {
78 return idx & (NET_RX_RING_SIZE - 1);
79 }
81 void network_rx(struct netfront_dev *dev)
82 {
83 RING_IDX rp,cons;
84 struct netif_rx_response *rx;
87 moretodo:
88 rp = dev->rx.sring->rsp_prod;
89 rmb(); /* Ensure we see queued responses up to 'rp'. */
90 cons = dev->rx.rsp_cons;
92 int nr_consumed=0;
93 while ((cons != rp))
94 {
95 struct net_buffer* buf;
96 unsigned char* page;
98 rx = RING_GET_RESPONSE(&dev->rx, cons);
100 if (rx->flags & NETRXF_extra_info)
101 {
102 printk("+++++++++++++++++++++ we have extras!\n");
103 continue;
104 }
107 if (rx->status == NETIF_RSP_NULL) continue;
109 int id = rx->id;
111 buf = &dev->rx_buffers[id];
112 page = (unsigned char*)buf->page;
113 gnttab_end_access(buf->gref);
115 if(rx->status>0)
116 {
117 dev->netif_rx(page+rx->offset,rx->status);
118 }
120 nr_consumed++;
122 ++cons;
123 }
124 dev->rx.rsp_cons=cons;
126 int more;
127 RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
128 if(more) goto moretodo;
130 RING_IDX req_prod = dev->rx.req_prod_pvt;
132 int i;
133 netif_rx_request_t *req;
135 for(i=0; i<nr_consumed; i++)
136 {
137 int id = xennet_rxidx(req_prod + i);
138 req = RING_GET_REQUEST(&dev->rx, req_prod + i);
139 struct net_buffer* buf = &dev->rx_buffers[id];
140 void* page = buf->page;
142 /* We are sure to have free gnttab entries since they got released above */
143 buf->gref = req->gref =
144 gnttab_grant_access(0,virt_to_mfn(page),0);
146 req->id = id;
147 }
149 wmb();
151 dev->rx.req_prod_pvt = req_prod + i;
153 int notify;
154 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
155 if (notify)
156 notify_remote_via_evtchn(dev->evtchn);
158 }
160 void network_tx_buf_gc(struct netfront_dev *dev)
161 {
164 RING_IDX cons, prod;
165 unsigned short id;
167 do {
168 prod = dev->tx.sring->rsp_prod;
169 rmb(); /* Ensure we see responses up to 'rp'. */
171 for (cons = dev->tx.rsp_cons; cons != prod; cons++)
172 {
173 struct netif_tx_response *txrsp;
175 txrsp = RING_GET_RESPONSE(&dev->tx, cons);
176 if (txrsp->status == NETIF_RSP_NULL)
177 continue;
179 id = txrsp->id;
180 struct net_buffer* buf = &dev->tx_buffers[id];
181 gnttab_end_access(buf->gref);
182 buf->gref=GRANT_INVALID_REF;
184 add_id_to_freelist(id,dev->tx_freelist);
185 up(&dev->tx_sem);
186 }
188 dev->tx.rsp_cons = prod;
190 /*
191 * Set a new event, then check for race with update of tx_cons.
192 * Note that it is essential to schedule a callback, no matter
193 * how few tx_buffers are pending. Even if there is space in the
194 * transmit ring, higher layers may be blocked because too much
195 * data is outstanding: in such cases notification from Xen is
196 * likely to be the only kick that we'll get.
197 */
198 dev->tx.sring->rsp_event =
199 prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
200 mb();
201 } while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
204 }
206 void netfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
207 {
208 int flags;
209 struct netfront_dev *dev = data;
211 local_irq_save(flags);
213 network_tx_buf_gc(dev);
214 network_rx(dev);
216 local_irq_restore(flags);
217 }
219 struct netfront_dev *init_netfront(char *nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6])
220 {
221 xenbus_transaction_t xbt;
222 char* err;
223 char* message=NULL;
224 struct netif_tx_sring *txs;
225 struct netif_rx_sring *rxs;
226 int retry=0;
227 int i;
228 char* mac;
229 char* msg;
231 struct netfront_dev *dev;
233 if (!nodename)
234 nodename = "device/vif/0";
236 char path[strlen(nodename) + 1 + 10 + 1];
238 if (!thenetif_rx)
239 thenetif_rx = netif_rx;
241 printk("************************ NETFRONT for %s **********\n\n\n", nodename);
243 dev = malloc(sizeof(*dev));
244 dev->nodename = strdup(nodename);
246 printk("net TX ring size %d\n", NET_TX_RING_SIZE);
247 printk("net RX ring size %d\n", NET_RX_RING_SIZE);
248 init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);
249 for(i=0;i<NET_TX_RING_SIZE;i++)
250 {
251 add_id_to_freelist(i,dev->tx_freelist);
252 dev->tx_buffers[i].page = NULL;
253 }
255 for(i=0;i<NET_RX_RING_SIZE;i++)
256 {
257 /* TODO: that's a lot of memory */
258 dev->rx_buffers[i].page = (char*)alloc_page();
259 }
261 txs = (struct netif_tx_sring*) alloc_page();
262 rxs = (struct netif_rx_sring *) alloc_page();
263 memset(txs,0,PAGE_SIZE);
264 memset(rxs,0,PAGE_SIZE);
267 SHARED_RING_INIT(txs);
268 SHARED_RING_INIT(rxs);
269 FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE);
270 FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE);
272 dev->tx_ring_ref = gnttab_grant_access(0,virt_to_mfn(txs),0);
273 dev->rx_ring_ref = gnttab_grant_access(0,virt_to_mfn(rxs),0);
275 evtchn_alloc_unbound_t op;
276 op.dom = DOMID_SELF;
277 snprintf(path, sizeof(path), "%s/backend-id", nodename);
278 op.remote_dom = xenbus_read_integer(path);
279 HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
280 clear_evtchn(op.port); /* Without, handler gets invoked now! */
281 dev->local_port = bind_evtchn(op.port, netfront_handler, dev);
282 dev->evtchn=op.port;
284 dev->netif_rx = thenetif_rx;
286 // FIXME: proper frees on failures
287 again:
288 err = xenbus_transaction_start(&xbt);
289 if (err) {
290 printk("starting transaction\n");
291 }
293 err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
294 dev->tx_ring_ref);
295 if (err) {
296 message = "writing tx ring-ref";
297 goto abort_transaction;
298 }
299 err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
300 dev->rx_ring_ref);
301 if (err) {
302 message = "writing rx ring-ref";
303 goto abort_transaction;
304 }
305 err = xenbus_printf(xbt, nodename,
306 "event-channel", "%u", dev->evtchn);
307 if (err) {
308 message = "writing event-channel";
309 goto abort_transaction;
310 }
312 err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);
314 if (err) {
315 message = "writing request-rx-copy";
316 goto abort_transaction;
317 }
319 err = xenbus_printf(xbt, nodename, "state", "%u",
320 4); /* connected */
323 err = xenbus_transaction_end(xbt, 0, &retry);
324 if (retry) {
325 goto again;
326 printk("completing transaction\n");
327 }
329 goto done;
331 abort_transaction:
332 xenbus_transaction_end(xbt, 1, &retry);
333 return NULL;
335 done:
337 snprintf(path, sizeof(path), "%s/backend", nodename);
338 msg = xenbus_read(XBT_NIL, path, &dev->backend);
339 snprintf(path, sizeof(path), "%s/mac", nodename);
340 msg = xenbus_read(XBT_NIL, path, &mac);
342 if ((dev->backend == NULL) || (mac == NULL)) {
343 struct evtchn_close op = { dev->local_port };
344 printk("%s: backend/mac failed\n", __func__);
345 unbind_evtchn(dev->local_port);
346 HYPERVISOR_event_channel_op(EVTCHNOP_close, &op);
347 return NULL;
348 }
350 printk("backend at %s\n",dev->backend);
351 printk("mac is %s\n",mac);
353 {
354 char path[strlen(dev->backend) + 1 + 5 + 1];
355 snprintf(path, sizeof(path), "%s/state", dev->backend);
357 xenbus_watch_path(XBT_NIL, path);
359 xenbus_wait_for_value(path,"4");
361 xenbus_unwatch_path(XBT_NIL, path);
362 }
364 printk("**************************\n");
366 init_rx_buffers(dev);
368 /* Special conversion specifier 'hh' needed for __ia64__. Without
369 this mini-os panics with 'Unaligned reference'. */
370 if (rawmac)
371 sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
372 &rawmac[0],
373 &rawmac[1],
374 &rawmac[2],
375 &rawmac[3],
376 &rawmac[4],
377 &rawmac[5]);
379 return dev;
380 }
382 void shutdown_netfront(struct netfront_dev *dev)
383 {
384 char* err;
385 char *nodename = dev->nodename;
387 char path[strlen(dev->backend) + 1 + 5 + 1];
389 printk("close network: backend at %s\n",dev->backend);
391 snprintf(path, sizeof(path), "%s/state", dev->backend);
392 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 5); /* closing */
393 xenbus_wait_for_value(path,"5");
395 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6);
396 xenbus_wait_for_value(path,"6");
398 unbind_evtchn(dev->local_port);
400 free(nodename);
401 free(dev->backend);
402 free(dev);
403 }
406 void init_rx_buffers(struct netfront_dev *dev)
407 {
408 int i, requeue_idx;
409 netif_rx_request_t *req;
410 int notify;
412 /* Rebuild the RX buffer freelist and the RX ring itself. */
413 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
414 {
415 struct net_buffer* buf = &dev->rx_buffers[requeue_idx];
416 req = RING_GET_REQUEST(&dev->rx, requeue_idx);
418 buf->gref = req->gref =
419 gnttab_grant_access(0,virt_to_mfn(buf->page),0);
421 req->id = requeue_idx;
423 requeue_idx++;
424 }
426 dev->rx.req_prod_pvt = requeue_idx;
428 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
430 if (notify)
431 notify_remote_via_evtchn(dev->evtchn);
433 dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
434 }
437 void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
438 {
439 int flags;
440 struct netif_tx_request *tx;
441 RING_IDX i;
442 int notify;
443 unsigned short id;
444 struct net_buffer* buf;
445 void* page;
447 down(&dev->tx_sem);
449 local_irq_save(flags);
450 id = get_id_from_freelist(dev->tx_freelist);
451 local_irq_restore(flags);
453 buf = &dev->tx_buffers[id];
454 page = buf->page;
455 if (!page)
456 page = buf->page = (char*) alloc_page();
458 i = dev->tx.req_prod_pvt;
459 tx = RING_GET_REQUEST(&dev->tx, i);
461 memcpy(page,data,len);
463 buf->gref =
464 tx->gref = gnttab_grant_access(0,virt_to_mfn(page),0);
466 tx->offset=0;
467 tx->size = len;
468 tx->flags=0;
469 tx->id = id;
470 dev->tx.req_prod_pvt = i + 1;
472 wmb();
474 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);
476 if(notify) notify_remote_via_evtchn(dev->evtchn);
478 local_irq_save(flags);
479 network_tx_buf_gc(dev);
480 local_irq_restore(flags);
481 }