ia64/xen-unstable

view extras/mini-os/netfront.c @ 16513:b1da8762f853

blktap: remove unused headers.

Attached patch removes unused linux specific headers
and makes bswap.h ready for BSD support.

This is first step for BSD support in blktap. More to come.
No functional change.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 04 10:48:28 2007 +0000 (2007-12-04)
parents 8d406e2813c8
children 76d88d1da324
line source
1 /* Minimal network driver for Mini-OS.
2 * Copyright (c) 2006-2007 Jacob Gorm Hansen, University of Copenhagen.
3 * Based on netfront.c from Xen Linux.
4 *
5 * Does not handle fragments or extras.
6 */
8 #include <os.h>
9 #include <xenbus.h>
10 #include <events.h>
11 #include <errno.h>
12 #include <xen/io/netif.h>
13 #include <gnttab.h>
14 #include <xmalloc.h>
15 #include <time.h>
16 #include <semaphore.h>
18 void init_rx_buffers(void);
20 struct net_info {
21 struct netif_tx_front_ring tx;
22 struct netif_rx_front_ring rx;
23 int tx_ring_ref;
24 int rx_ring_ref;
25 unsigned int evtchn, local_port;
27 } net_info;
30 char* xenbus_printf(xenbus_transaction_t xbt,
31 char* node,char* path,
32 char* fmt,unsigned int arg)
33 {
34 char fullpath[256];
35 char val[256];
37 sprintf(fullpath,"%s/%s",node,path);
38 sprintf(val,fmt,arg);
39 xenbus_write(xbt,fullpath,val);
41 return NULL;
42 }
45 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
46 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
47 #define GRANT_INVALID_REF 0
50 unsigned short rx_freelist[NET_RX_RING_SIZE];
51 unsigned short tx_freelist[NET_TX_RING_SIZE];
52 __DECLARE_SEMAPHORE_GENERIC(tx_sem, NET_TX_RING_SIZE);
54 struct net_buffer {
55 void* page;
56 int gref;
57 };
58 struct net_buffer rx_buffers[NET_RX_RING_SIZE];
59 struct net_buffer tx_buffers[NET_TX_RING_SIZE];
61 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
62 {
63 freelist[id] = freelist[0];
64 freelist[0] = id;
65 }
67 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
68 {
69 unsigned int id = freelist[0];
70 freelist[0] = freelist[id];
71 return id;
72 }
74 __attribute__((weak)) void netif_rx(unsigned char* data,int len)
75 {
76 printk("%d bytes incoming at %p\n",len,data);
77 }
79 __attribute__((weak)) void net_app_main(void*si,unsigned char*mac) {}
81 static inline int xennet_rxidx(RING_IDX idx)
82 {
83 return idx & (NET_RX_RING_SIZE - 1);
84 }
86 void network_rx(void)
87 {
88 struct net_info *np = &net_info;
89 RING_IDX rp,cons;
90 struct netif_rx_response *rx;
93 moretodo:
94 rp = np->rx.sring->rsp_prod;
95 rmb(); /* Ensure we see queued responses up to 'rp'. */
96 cons = np->rx.rsp_cons;
98 int nr_consumed=0;
99 while ((cons != rp))
100 {
101 struct net_buffer* buf;
102 unsigned char* page;
104 rx = RING_GET_RESPONSE(&np->rx, cons);
106 if (rx->flags & NETRXF_extra_info)
107 {
108 printk("+++++++++++++++++++++ we have extras!\n");
109 continue;
110 }
113 if (rx->status == NETIF_RSP_NULL) continue;
115 int id = rx->id;
117 buf = &rx_buffers[id];
118 page = (unsigned char*)buf->page;
119 gnttab_end_access(buf->gref);
121 if(rx->status>0)
122 {
123 netif_rx(page+rx->offset,rx->status);
124 }
126 add_id_to_freelist(id,rx_freelist);
128 nr_consumed++;
130 ++cons;
131 }
132 np->rx.rsp_cons=rp;
134 int more;
135 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx,more);
136 if(more) goto moretodo;
138 RING_IDX req_prod = np->rx.req_prod_pvt;
140 int i;
141 netif_rx_request_t *req;
143 for(i=0; i<nr_consumed; i++)
144 {
145 int id = xennet_rxidx(req_prod + i);
146 req = RING_GET_REQUEST(&np->rx, req_prod + i);
147 struct net_buffer* buf = &rx_buffers[id];
148 void* page = buf->page;
150 /* We are sure to have free gnttab entries since they got released above */
151 buf->gref = req->gref =
152 gnttab_grant_access(0,virt_to_mfn(page),0);
154 req->id = id;
155 }
157 wmb();
159 np->rx.req_prod_pvt = req_prod + i;
161 int notify;
162 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
163 if (notify)
164 notify_remote_via_evtchn(np->evtchn);
166 }
168 void network_tx_buf_gc(void)
169 {
172 RING_IDX cons, prod;
173 unsigned short id;
174 struct net_info *np = &net_info;
176 do {
177 prod = np->tx.sring->rsp_prod;
178 rmb(); /* Ensure we see responses up to 'rp'. */
180 for (cons = np->tx.rsp_cons; cons != prod; cons++)
181 {
182 struct netif_tx_response *txrsp;
184 txrsp = RING_GET_RESPONSE(&np->tx, cons);
185 if (txrsp->status == NETIF_RSP_NULL)
186 continue;
188 id = txrsp->id;
189 struct net_buffer* buf = &tx_buffers[id];
190 gnttab_end_access(buf->gref);
191 buf->gref=GRANT_INVALID_REF;
193 add_id_to_freelist(id,tx_freelist);
194 up(&tx_sem);
195 }
197 np->tx.rsp_cons = prod;
199 /*
200 * Set a new event, then check for race with update of tx_cons.
201 * Note that it is essential to schedule a callback, no matter
202 * how few tx_buffers are pending. Even if there is space in the
203 * transmit ring, higher layers may be blocked because too much
204 * data is outstanding: in such cases notification from Xen is
205 * likely to be the only kick that we'll get.
206 */
207 np->tx.sring->rsp_event =
208 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
209 mb();
210 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
213 }
215 void netfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
216 {
217 int flags;
219 local_irq_save(flags);
221 network_tx_buf_gc();
222 network_rx();
224 local_irq_restore(flags);
225 }
227 char* backend;
229 void init_netfront(void* si)
230 {
231 xenbus_transaction_t xbt;
232 struct net_info* info = &net_info;
233 char* err;
234 char* message=NULL;
235 char nodename[] = "device/vif/0";
236 struct netif_tx_sring *txs;
237 struct netif_rx_sring *rxs;
238 int retry=0;
239 int i;
240 char* mac;
241 char* msg;
243 printk("************************ NETFRONT **********\n\n\n");
245 for(i=0;i<NET_TX_RING_SIZE;i++)
246 {
247 add_id_to_freelist(i,tx_freelist);
248 tx_buffers[i].page = (char*)alloc_page();
249 }
251 for(i=0;i<NET_RX_RING_SIZE;i++)
252 {
253 add_id_to_freelist(i,rx_freelist);
254 rx_buffers[i].page = (char*)alloc_page();
255 }
257 txs = (struct netif_tx_sring*) alloc_page();
258 rxs = (struct netif_rx_sring *) alloc_page();
259 memset(txs,0,PAGE_SIZE);
260 memset(rxs,0,PAGE_SIZE);
263 SHARED_RING_INIT(txs);
264 SHARED_RING_INIT(rxs);
265 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
266 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
268 info->tx_ring_ref = gnttab_grant_access(0,virt_to_mfn(txs),0);
269 info->rx_ring_ref = gnttab_grant_access(0,virt_to_mfn(rxs),0);
271 evtchn_alloc_unbound_t op;
272 op.dom = DOMID_SELF;
273 op.remote_dom = 0;
274 HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
275 clear_evtchn(op.port); /* Without, handler gets invoked now! */
276 info->local_port = bind_evtchn(op.port, netfront_handler, NULL);
277 info->evtchn=op.port;
279 again:
280 err = xenbus_transaction_start(&xbt);
281 if (err) {
282 printk("starting transaction\n");
283 }
285 err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
286 info->tx_ring_ref);
287 if (err) {
288 message = "writing tx ring-ref";
289 goto abort_transaction;
290 }
291 err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
292 info->rx_ring_ref);
293 if (err) {
294 message = "writing rx ring-ref";
295 goto abort_transaction;
296 }
297 err = xenbus_printf(xbt, nodename,
298 "event-channel", "%u", info->evtchn);
299 if (err) {
300 message = "writing event-channel";
301 goto abort_transaction;
302 }
304 err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);
306 if (err) {
307 message = "writing request-rx-copy";
308 goto abort_transaction;
309 }
311 err = xenbus_printf(xbt, nodename, "state", "%u",
312 4); /* connected */
315 err = xenbus_transaction_end(xbt, 0, &retry);
316 if (retry) {
317 goto again;
318 printk("completing transaction\n");
319 }
321 goto done;
323 abort_transaction:
324 xenbus_transaction_end(xbt, 1, &retry);
326 done:
328 msg = xenbus_read(XBT_NIL, "device/vif/0/backend", &backend);
329 msg = xenbus_read(XBT_NIL, "device/vif/0/mac", &mac);
331 if ((backend == NULL) || (mac == NULL)) {
332 struct evtchn_close op = { info->local_port };
333 printk("%s: backend/mac failed\n", __func__);
334 unbind_evtchn(info->local_port);
335 HYPERVISOR_event_channel_op(EVTCHNOP_close, &op);
336 return;
337 }
339 printk("backend at %s\n",backend);
340 printk("mac is %s\n",mac);
342 char path[256];
343 sprintf(path,"%s/state",backend);
345 xenbus_watch_path(XBT_NIL, path);
347 xenbus_wait_for_value(path,"4");
349 //free(backend);
351 printk("**************************\n");
353 init_rx_buffers();
355 unsigned char rawmac[6];
356 /* Special conversion specifier 'hh' needed for __ia64__. Without
357 this mini-os panics with 'Unaligned reference'. */
358 sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
359 &rawmac[0],
360 &rawmac[1],
361 &rawmac[2],
362 &rawmac[3],
363 &rawmac[4],
364 &rawmac[5]);
366 net_app_main(si,rawmac);
367 }
369 void shutdown_netfront(void)
370 {
371 //xenbus_transaction_t xbt;
372 char* err;
373 char nodename[] = "device/vif/0";
375 char path[256];
377 printk("close network: backend at %s\n",backend);
379 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6); /* closing */
380 sprintf(path,"%s/state",backend);
382 xenbus_wait_for_value(path,"6");
384 err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 1);
386 xenbus_wait_for_value(path,"2");
388 unbind_all_ports();
390 free(backend);
391 }
394 void init_rx_buffers(void)
395 {
396 struct net_info* np = &net_info;
397 int i, requeue_idx;
398 netif_rx_request_t *req;
399 int notify;
401 /* Rebuild the RX buffer freelist and the RX ring itself. */
402 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
403 {
404 struct net_buffer* buf = &rx_buffers[requeue_idx];
405 req = RING_GET_REQUEST(&np->rx, requeue_idx);
407 buf->gref = req->gref =
408 gnttab_grant_access(0,virt_to_mfn(buf->page),0);
410 req->id = requeue_idx;
412 requeue_idx++;
413 }
415 np->rx.req_prod_pvt = requeue_idx;
417 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
419 if (notify)
420 notify_remote_via_evtchn(np->evtchn);
422 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
423 }
426 void netfront_xmit(unsigned char* data,int len)
427 {
428 int flags;
429 struct net_info* info = &net_info;
430 struct netif_tx_request *tx;
431 RING_IDX i;
432 int notify;
433 int id;
434 struct net_buffer* buf;
435 void* page;
437 down(&tx_sem);
439 local_irq_save(flags);
440 id = get_id_from_freelist(tx_freelist);
441 local_irq_restore(flags);
443 buf = &tx_buffers[id];
444 page = buf->page;
446 i = info->tx.req_prod_pvt;
447 tx = RING_GET_REQUEST(&info->tx, i);
449 memcpy(page,data,len);
451 buf->gref =
452 tx->gref = gnttab_grant_access(0,virt_to_mfn(page),0);
454 tx->offset=0;
455 tx->size = len;
456 tx->flags=0;
457 tx->id = id;
458 info->tx.req_prod_pvt = i + 1;
460 wmb();
462 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->tx, notify);
464 if(notify) notify_remote_via_evtchn(info->evtchn);
466 local_irq_save(flags);
467 network_tx_buf_gc();
468 local_irq_restore(flags);
469 }