ia64/xen-unstable

view extras/mini-os/netfront.c @ 16458:ce3e5e859d66

vt-d: Fix iommu_map_page().
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sun Nov 25 09:23:02 2007 +0000 (2007-11-25)
parents 7eea09b18839
children 8d406e2813c8
line source
1 /* Minimal network driver for Mini-OS.
2 * Copyright (c) 2006-2007 Jacob Gorm Hansen, University of Copenhagen.
3 * Based on netfront.c from Xen Linux.
4 *
5 * Does not handle fragments or extras.
6 */
8 #include <os.h>
9 #include <xenbus.h>
10 #include <events.h>
11 #include <errno.h>
12 #include <xen/io/netif.h>
13 #include <gnttab.h>
14 #include <xmalloc.h>
15 #include <time.h>
16 #include <semaphore.h>
18 void init_rx_buffers(void);
20 struct net_info {
21 struct netif_tx_front_ring tx;
22 struct netif_rx_front_ring rx;
23 int tx_ring_ref;
24 int rx_ring_ref;
25 unsigned int evtchn, local_port;
27 } net_info;
30 char* xenbus_printf(xenbus_transaction_t xbt,
31 char* node,char* path,
32 char* fmt,unsigned int arg)
33 {
34 char fullpath[256];
35 char val[256];
37 sprintf(fullpath,"%s/%s",node,path);
38 sprintf(val,fmt,arg);
39 xenbus_write(xbt,fullpath,val);
41 return NULL;
42 }
45 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
46 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
47 #define GRANT_INVALID_REF 0
50 unsigned short rx_freelist[NET_RX_RING_SIZE];
51 unsigned short tx_freelist[NET_TX_RING_SIZE];
52 __DECLARE_SEMAPHORE_GENERIC(tx_sem, NET_TX_RING_SIZE);
54 struct net_buffer {
55 void* page;
56 int gref;
57 };
58 struct net_buffer rx_buffers[NET_RX_RING_SIZE];
59 struct net_buffer tx_buffers[NET_TX_RING_SIZE];
61 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
62 {
63 freelist[id] = freelist[0];
64 freelist[0] = id;
65 }
67 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
68 {
69 unsigned int id = freelist[0];
70 freelist[0] = freelist[id];
71 return id;
72 }
74 __attribute__((weak)) void netif_rx(unsigned char* data,int len)
75 {
76 printk("%d bytes incoming at %p\n",len,data);
77 }
79 __attribute__((weak)) void net_app_main(void*si,unsigned char*mac) {}
81 static inline int xennet_rxidx(RING_IDX idx)
82 {
83 return idx & (NET_RX_RING_SIZE - 1);
84 }
86 void network_rx(void)
87 {
88 struct net_info *np = &net_info;
89 RING_IDX rp,cons;
90 struct netif_rx_response *rx;
93 moretodo:
94 rp = np->rx.sring->rsp_prod;
95 rmb(); /* Ensure we see queued responses up to 'rp'. */
96 cons = np->rx.rsp_cons;
98 int nr_consumed=0;
99 while ((cons != rp))
100 {
101 struct net_buffer* buf;
102 unsigned char* page;
104 rx = RING_GET_RESPONSE(&np->rx, cons);
106 if (rx->flags & NETRXF_extra_info)
107 {
108 printk("+++++++++++++++++++++ we have extras!\n");
109 continue;
110 }
113 if (rx->status == NETIF_RSP_NULL) continue;
115 int id = rx->id;
117 buf = &rx_buffers[id];
118 page = (unsigned char*)buf->page;
119 gnttab_end_access(buf->gref);
121 if(rx->status>0)
122 {
123 netif_rx(page+rx->offset,rx->status);
124 }
126 add_id_to_freelist(id,rx_freelist);
128 nr_consumed++;
130 ++cons;
131 }
132 np->rx.rsp_cons=rp;
134 int more;
135 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx,more);
136 if(more) goto moretodo;
138 RING_IDX req_prod = np->rx.req_prod_pvt;
140 int i;
141 netif_rx_request_t *req;
143 for(i=0; i<nr_consumed; i++)
144 {
145 int id = xennet_rxidx(req_prod + i);
146 req = RING_GET_REQUEST(&np->rx, req_prod + i);
147 struct net_buffer* buf = &rx_buffers[id];
148 void* page = buf->page;
150 buf->gref = req->gref =
151 gnttab_grant_access(0,virt_to_mfn(page),0);
153 req->id = id;
154 }
156 wmb();
158 np->rx.req_prod_pvt = req_prod + i;
160 int notify;
161 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
162 if (notify)
163 notify_remote_via_evtchn(np->evtchn);
165 }
167 void network_tx_buf_gc(void)
168 {
171 RING_IDX cons, prod;
172 unsigned short id;
173 struct net_info *np = &net_info;
175 do {
176 prod = np->tx.sring->rsp_prod;
177 rmb(); /* Ensure we see responses up to 'rp'. */
179 for (cons = np->tx.rsp_cons; cons != prod; cons++)
180 {
181 struct netif_tx_response *txrsp;
183 txrsp = RING_GET_RESPONSE(&np->tx, cons);
184 if (txrsp->status == NETIF_RSP_NULL)
185 continue;
187 id = txrsp->id;
188 struct net_buffer* buf = &tx_buffers[id];
189 gnttab_end_access(buf->gref);
190 buf->gref=GRANT_INVALID_REF;
192 add_id_to_freelist(id,tx_freelist);
193 up(&tx_sem);
194 }
196 np->tx.rsp_cons = prod;
198 /*
199 * Set a new event, then check for race with update of tx_cons.
200 * Note that it is essential to schedule a callback, no matter
201 * how few tx_buffers are pending. Even if there is space in the
202 * transmit ring, higher layers may be blocked because too much
203 * data is outstanding: in such cases notification from Xen is
204 * likely to be the only kick that we'll get.
205 */
206 np->tx.sring->rsp_event =
207 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
208 mb();
209 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
212 }
214 void netfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
215 {
216 int flags;
218 local_irq_save(flags);
220 network_tx_buf_gc();
221 network_rx();
223 local_irq_restore(flags);
224 }
226 char* backend;
228 void init_netfront(void* si)
229 {
230 xenbus_transaction_t xbt;
231 struct net_info* info = &net_info;
232 char* err;
233 char* message=NULL;
234 char nodename[] = "device/vif/0";
235 struct netif_tx_sring *txs;
236 struct netif_rx_sring *rxs;
237 int retry=0;
238 int i;
239 char* mac;
240 char* msg;
242 printk("************************ NETFRONT **********\n\n\n");
244 for(i=0;i<NET_TX_RING_SIZE;i++)
245 {
246 add_id_to_freelist(i,tx_freelist);
247 tx_buffers[i].page = (char*)alloc_page();
248 }
250 for(i=0;i<NET_RX_RING_SIZE;i++)
251 {
252 add_id_to_freelist(i,rx_freelist);
253 rx_buffers[i].page = (char*)alloc_page();
254 }
256 txs = (struct netif_tx_sring*) alloc_page();
257 rxs = (struct netif_rx_sring *) alloc_page();
258 memset(txs,0,PAGE_SIZE);
259 memset(rxs,0,PAGE_SIZE);
262 SHARED_RING_INIT(txs);
263 SHARED_RING_INIT(rxs);
264 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
265 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
267 info->tx_ring_ref = gnttab_grant_access(0,virt_to_mfn(txs),0);
268 info->rx_ring_ref = gnttab_grant_access(0,virt_to_mfn(rxs),0);
270 evtchn_alloc_unbound_t op;
271 op.dom = DOMID_SELF;
272 op.remote_dom = 0;
273 HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
274 clear_evtchn(op.port); /* Without, handler gets invoked now! */
275 info->local_port = bind_evtchn(op.port, netfront_handler, NULL);
276 info->evtchn=op.port;
278 again:
279 err = xenbus_transaction_start(&xbt);
280 if (err) {
281 printk("starting transaction\n");
282 }
284 err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
285 info->tx_ring_ref);
286 if (err) {
287 message = "writing tx ring-ref";
288 goto abort_transaction;
289 }
290 err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
291 info->rx_ring_ref);
292 if (err) {
293 message = "writing rx ring-ref";
294 goto abort_transaction;
295 }
296 err = xenbus_printf(xbt, nodename,
297 "event-channel", "%u", info->evtchn);
298 if (err) {
299 message = "writing event-channel";
300 goto abort_transaction;
301 }
303 err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);
305 if (err) {
306 message = "writing request-rx-copy";
307 goto abort_transaction;
308 }
310 err = xenbus_printf(xbt, nodename, "state", "%u",
311 4); /* connected */
314 err = xenbus_transaction_end(xbt, 0, &retry);
315 if (retry) {
316 goto again;
317 printk("completing transaction\n");
318 }
320 goto done;
322 abort_transaction:
323 xenbus_transaction_end(xbt, 1, &retry);
325 done:
327 msg = xenbus_read(XBT_NIL, "device/vif/0/backend", &backend);
328 msg = xenbus_read(XBT_NIL, "device/vif/0/mac", &mac);
330 if ((backend == NULL) || (mac == NULL)) {
331 struct evtchn_close op = { info->local_port };
332 printk("%s: backend/mac failed\n", __func__);
333 unbind_evtchn(info->local_port);
334 HYPERVISOR_event_channel_op(EVTCHNOP_close, &op);
335 return;
336 }
338 printk("backend at %s\n",backend);
339 printk("mac is %s\n",mac);
341 char path[256];
342 sprintf(path,"%s/state",backend);
344 xenbus_watch_path(XBT_NIL, path);
346 xenbus_wait_for_value(path,"4");
348 //free(backend);
350 printk("**************************\n");
352 init_rx_buffers();
354 unsigned char rawmac[6];
355 /* Special conversion specifier 'hh' needed for __ia64__. Without
356 this mini-os panics with 'Unaligned reference'. */
357 sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
358 &rawmac[0],
359 &rawmac[1],
360 &rawmac[2],
361 &rawmac[3],
362 &rawmac[4],
363 &rawmac[5]);
365 net_app_main(si,rawmac);
366 }
368 void shutdown_netfront(void)
369 {
370 //xenbus_transaction_t xbt;
371 char* err;
372 char nodename[] = "device/vif/0";
374 char path[256];
376 printk("close network: backend at %s\n",backend);
378 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6); /* closing */
379 sprintf(path,"%s/state",backend);
381 xenbus_wait_for_value(path,"6");
383 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 1);
385 xenbus_wait_for_value(path,"2");
387 unbind_all_ports();
389 free(backend);
390 }
393 void init_rx_buffers(void)
394 {
395 struct net_info* np = &net_info;
396 int i, requeue_idx;
397 netif_rx_request_t *req;
398 int notify;
400 /* Rebuild the RX buffer freelist and the RX ring itself. */
401 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
402 {
403 struct net_buffer* buf = &rx_buffers[requeue_idx];
404 req = RING_GET_REQUEST(&np->rx, requeue_idx);
406 buf->gref = req->gref =
407 gnttab_grant_access(0,virt_to_mfn(buf->page),0);
409 req->id = requeue_idx;
411 requeue_idx++;
412 }
414 np->rx.req_prod_pvt = requeue_idx;
416 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
418 if (notify)
419 notify_remote_via_evtchn(np->evtchn);
421 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
422 }
425 void netfront_xmit(unsigned char* data,int len)
426 {
427 int flags;
428 struct net_info* info = &net_info;
429 struct netif_tx_request *tx;
430 RING_IDX i;
431 int notify;
432 int id;
433 struct net_buffer* buf;
434 void* page;
436 down(&tx_sem);
438 local_irq_save(flags);
440 id = get_id_from_freelist(tx_freelist);
441 buf = &tx_buffers[id];
442 page = buf->page;
444 i = info->tx.req_prod_pvt;
445 tx = RING_GET_REQUEST(&info->tx, i);
447 memcpy(page,data,len);
449 buf->gref =
450 tx->gref = gnttab_grant_access(0,virt_to_mfn(page),0);
452 tx->offset=0;
453 tx->size = len;
454 tx->flags=0;
455 tx->id = id;
456 info->tx.req_prod_pvt = i + 1;
458 wmb();
460 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->tx, notify);
462 if(notify) notify_remote_via_evtchn(info->evtchn);
464 network_tx_buf_gc();
466 local_irq_restore(flags);
467 }