direct-io.hg

view extras/mini-os/netfront.c @ 15481:538c3d8aa4b1

Revert 15471:7ac7f147241405af83e7a9d748cf7b01279734fc

Block-device specifiers in ioemu can contain colons, so skipping
always past the first colon is not a good idea. Better solutions are
in the pipeline to solve the blktap issues.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Jul 06 15:01:20 2007 +0100 (2007-07-06)
parents e1364f2c6867
children
line source
1 /* Minimal network driver for Mini-OS.
2 * Copyright (c) 2006-2007 Jacob Gorm Hansen, University of Copenhagen.
3 * Based on netfront.c from Xen Linux.
4 *
5 * Does not handle fragments or extras.
6 */
8 #include <os.h>
9 #include <xenbus.h>
10 #include <events.h>
11 #include <errno.h>
12 #include <xen/io/netif.h>
13 #include <gnttab.h>
14 #include <xmalloc.h>
15 #include <time.h>
17 void init_rx_buffers(void);
19 struct net_info {
20 struct netif_tx_front_ring tx;
21 struct netif_rx_front_ring rx;
22 int tx_ring_ref;
23 int rx_ring_ref;
24 unsigned int evtchn, local_port;
26 } net_info;
29 char* xenbus_printf(xenbus_transaction_t xbt,
30 char* node,char* path,
31 char* fmt,unsigned int arg)
32 {
33 char fullpath[256];
34 char val[256];
36 sprintf(fullpath,"%s/%s",node,path);
37 sprintf(val,fmt,arg);
38 xenbus_write(xbt,fullpath,val);
40 return NULL;
41 }
44 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
45 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
46 #define GRANT_INVALID_REF 0
49 unsigned short rx_freelist[NET_RX_RING_SIZE];
50 unsigned short tx_freelist[NET_TX_RING_SIZE];
52 struct net_buffer {
53 void* page;
54 int gref;
55 };
56 struct net_buffer rx_buffers[NET_RX_RING_SIZE];
57 struct net_buffer tx_buffers[NET_TX_RING_SIZE];
59 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
60 {
61 freelist[id] = freelist[0];
62 freelist[0] = id;
63 }
65 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
66 {
67 unsigned int id = freelist[0];
68 freelist[0] = freelist[id];
69 return id;
70 }
72 __attribute__((weak)) void netif_rx(unsigned char* data,int len)
73 {
74 printk("%d bytes incoming at %p\n",len,data);
75 }
77 __attribute__((weak)) void net_app_main(void*si,unsigned char*mac) {}
79 static inline int xennet_rxidx(RING_IDX idx)
80 {
81 return idx & (NET_RX_RING_SIZE - 1);
82 }
84 void network_rx(void)
85 {
86 struct net_info *np = &net_info;
87 RING_IDX rp,cons;
88 struct netif_rx_response *rx;
91 moretodo:
92 rp = np->rx.sring->rsp_prod;
93 rmb(); /* Ensure we see queued responses up to 'rp'. */
94 cons = np->rx.rsp_cons;
96 int nr_consumed=0;
97 while ((cons != rp))
98 {
99 struct net_buffer* buf;
100 unsigned char* page;
102 rx = RING_GET_RESPONSE(&np->rx, cons);
104 if (rx->flags & NETRXF_extra_info)
105 {
106 printk("+++++++++++++++++++++ we have extras!\n");
107 continue;
108 }
111 if (rx->status == NETIF_RSP_NULL) continue;
113 int id = rx->id;
115 buf = &rx_buffers[id];
116 page = (unsigned char*)buf->page;
117 gnttab_end_access(buf->gref);
119 if(rx->status>0)
120 {
121 netif_rx(page+rx->offset,rx->status);
122 }
124 add_id_to_freelist(id,rx_freelist);
126 nr_consumed++;
128 ++cons;
129 }
130 np->rx.rsp_cons=rp;
132 int more;
133 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx,more);
134 if(more) goto moretodo;
136 RING_IDX req_prod = np->rx.req_prod_pvt;
138 int i;
139 netif_rx_request_t *req;
141 for(i=0; i<nr_consumed; i++)
142 {
143 int id = xennet_rxidx(req_prod + i);
144 req = RING_GET_REQUEST(&np->rx, req_prod + i);
145 struct net_buffer* buf = &rx_buffers[id];
146 void* page = buf->page;
148 buf->gref = req->gref =
149 gnttab_grant_access(0,virt_to_mfn(page),0);
151 req->id = id;
152 }
154 wmb();
156 np->rx.req_prod_pvt = req_prod + i;
158 int notify;
159 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
160 if (notify)
161 notify_remote_via_evtchn(np->evtchn);
163 }
165 void network_tx_buf_gc(void)
166 {
169 RING_IDX cons, prod;
170 unsigned short id;
171 struct net_info *np = &net_info;
173 do {
174 prod = np->tx.sring->rsp_prod;
175 rmb(); /* Ensure we see responses up to 'rp'. */
177 for (cons = np->tx.rsp_cons; cons != prod; cons++)
178 {
179 struct netif_tx_response *txrsp;
181 txrsp = RING_GET_RESPONSE(&np->tx, cons);
182 if (txrsp->status == NETIF_RSP_NULL)
183 continue;
185 id = txrsp->id;
186 struct net_buffer* buf = &tx_buffers[id];
187 gnttab_end_access(buf->gref);
188 buf->gref=GRANT_INVALID_REF;
190 add_id_to_freelist(id,tx_freelist);
191 }
193 np->tx.rsp_cons = prod;
195 /*
196 * Set a new event, then check for race with update of tx_cons.
197 * Note that it is essential to schedule a callback, no matter
198 * how few tx_buffers are pending. Even if there is space in the
199 * transmit ring, higher layers may be blocked because too much
200 * data is outstanding: in such cases notification from Xen is
201 * likely to be the only kick that we'll get.
202 */
203 np->tx.sring->rsp_event =
204 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
205 mb();
206 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
209 }
211 void netfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
212 {
213 int flags;
215 local_irq_save(flags);
217 network_tx_buf_gc();
218 network_rx();
220 local_irq_restore(flags);
221 }
223 char* backend;
225 void init_netfront(void* si)
226 {
227 xenbus_transaction_t xbt;
228 struct net_info* info = &net_info;
229 char* err;
230 char* message=NULL;
231 char nodename[] = "device/vif/0";
232 struct netif_tx_sring *txs;
233 struct netif_rx_sring *rxs;
234 int retry=0;
235 int i;
236 char* mac;
237 char* msg;
239 printk("************************ NETFRONT **********\n\n\n");
241 for(i=0;i<NET_TX_RING_SIZE;i++)
242 {
243 add_id_to_freelist(i,tx_freelist);
244 tx_buffers[i].page = (char*)alloc_page();
245 }
247 for(i=0;i<NET_RX_RING_SIZE;i++)
248 {
249 add_id_to_freelist(i,rx_freelist);
250 rx_buffers[i].page = (char*)alloc_page();
251 }
253 txs = (struct netif_tx_sring*) alloc_page();
254 rxs = (struct netif_rx_sring *) alloc_page();
255 memset(txs,0,PAGE_SIZE);
256 memset(rxs,0,PAGE_SIZE);
259 SHARED_RING_INIT(txs);
260 SHARED_RING_INIT(rxs);
261 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
262 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
264 info->tx_ring_ref = gnttab_grant_access(0,virt_to_mfn(txs),0);
265 info->rx_ring_ref = gnttab_grant_access(0,virt_to_mfn(rxs),0);
267 evtchn_alloc_unbound_t op;
268 op.dom = DOMID_SELF;
269 op.remote_dom = 0;
270 HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
271 clear_evtchn(op.port); /* Without, handler gets invoked now! */
272 info->local_port = bind_evtchn(op.port, netfront_handler, NULL);
273 info->evtchn=op.port;
275 again:
276 err = xenbus_transaction_start(&xbt);
277 if (err) {
278 printk("starting transaction\n");
279 }
281 err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
282 info->tx_ring_ref);
283 if (err) {
284 message = "writing tx ring-ref";
285 goto abort_transaction;
286 }
287 err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
288 info->rx_ring_ref);
289 if (err) {
290 message = "writing rx ring-ref";
291 goto abort_transaction;
292 }
293 err = xenbus_printf(xbt, nodename,
294 "event-channel", "%u", info->evtchn);
295 if (err) {
296 message = "writing event-channel";
297 goto abort_transaction;
298 }
300 err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);
302 if (err) {
303 message = "writing request-rx-copy";
304 goto abort_transaction;
305 }
307 err = xenbus_printf(xbt, nodename, "state", "%u",
308 4); /* connected */
311 err = xenbus_transaction_end(xbt, 0, &retry);
312 if (retry) {
313 goto again;
314 printk("completing transaction\n");
315 }
317 goto done;
319 abort_transaction:
320 xenbus_transaction_end(xbt, 1, &retry);
322 done:
324 msg = xenbus_read(XBT_NIL, "device/vif/0/backend", &backend);
325 msg = xenbus_read(XBT_NIL, "device/vif/0/mac", &mac);
327 if ((backend == NULL) || (mac == NULL)) {
328 struct evtchn_close op = { info->local_port };
329 printk("%s: backend/mac failed\n", __func__);
330 unbind_evtchn(info->local_port);
331 HYPERVISOR_event_channel_op(EVTCHNOP_close, &op);
332 return;
333 }
335 printk("backend at %s\n",backend);
336 printk("mac is %s\n",mac);
338 char path[256];
339 sprintf(path,"%s/state",backend);
341 xenbus_watch_path(XBT_NIL, path);
343 xenbus_wait_for_value(path,"4");
345 //free(backend);
347 printk("**************************\n");
349 init_rx_buffers();
351 unsigned char rawmac[6];
352 /* Special conversion specifier 'hh' needed for __ia64__. Without
353 this mini-os panics with 'Unaligned reference'. */
354 sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
355 &rawmac[0],
356 &rawmac[1],
357 &rawmac[2],
358 &rawmac[3],
359 &rawmac[4],
360 &rawmac[5]);
362 net_app_main(si,rawmac);
363 }
365 void shutdown_netfront(void)
366 {
367 //xenbus_transaction_t xbt;
368 char* err;
369 char nodename[] = "device/vif/0";
371 char path[256];
373 printk("close network: backend at %s\n",backend);
375 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6); /* closing */
376 sprintf(path,"%s/state",backend);
378 xenbus_wait_for_value(path,"6");
380 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 1);
382 xenbus_wait_for_value(path,"2");
384 unbind_all_ports();
386 }
389 void init_rx_buffers(void)
390 {
391 struct net_info* np = &net_info;
392 int i, requeue_idx;
393 netif_rx_request_t *req;
394 int notify;
396 /* Rebuild the RX buffer freelist and the RX ring itself. */
397 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
398 {
399 struct net_buffer* buf = &rx_buffers[requeue_idx];
400 req = RING_GET_REQUEST(&np->rx, requeue_idx);
402 buf->gref = req->gref =
403 gnttab_grant_access(0,virt_to_mfn(buf->page),0);
405 req->id = requeue_idx;
407 requeue_idx++;
408 }
410 np->rx.req_prod_pvt = requeue_idx;
412 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
414 if (notify)
415 notify_remote_via_evtchn(np->evtchn);
417 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
418 }
421 void netfront_xmit(unsigned char* data,int len)
422 {
423 int flags;
424 local_irq_save(flags);
426 struct net_info* info = &net_info;
427 struct netif_tx_request *tx;
428 RING_IDX i = info->tx.req_prod_pvt;
429 int notify;
430 int id = get_id_from_freelist(tx_freelist);
431 struct net_buffer* buf = &tx_buffers[id];
432 void* page = buf->page;
434 tx = RING_GET_REQUEST(&info->tx, i);
436 memcpy(page,data,len);
438 buf->gref =
439 tx->gref = gnttab_grant_access(0,virt_to_mfn(page),0);
441 tx->offset=0;
442 tx->size = len;
443 tx->flags=0;
444 tx->id = id;
445 info->tx.req_prod_pvt = i + 1;
447 wmb();
449 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->tx, notify);
451 if(notify) notify_remote_via_evtchn(info->evtchn);
453 network_tx_buf_gc();
455 local_irq_restore(flags);
456 }