win-pvdrivers

view xennet/xennet_rx.c @ 222:d37e4b226919

Almost working - rx offload packet splitting is working but not yet calculating checksums correctly. There may still be an occasional crash on init too from xennet_oid...
author James Harper <james.harper@bendigoit.com.au>
date Tue Mar 25 08:24:41 2008 +1100 (2008-03-25)
parents 03fcf506d609
children 26f8a3615539
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static PMDL
24 get_page_from_freelist(struct xennet_info *xi)
25 {
26 PMDL mdl;
28 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
30 if (xi->page_free == 0)
31 {
32 mdl = AllocatePagesExtra(1, sizeof(grant_ref_t));
33 *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)) = xi->XenInterface.GntTbl_GrantAccess(
34 xi->XenInterface.InterfaceHeader.Context, 0,
35 *MmGetMdlPfnArray(mdl), FALSE, 0);
36 // KdPrint(("New Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
37 // mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
38 }
39 else
40 {
41 xi->page_free--;
42 mdl = xi->page_list[xi->page_free];
43 // KdPrint(("Old Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
44 // mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
45 }
47 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
49 return mdl;
50 }
52 static VOID
53 free_page_freelist(struct xennet_info *xi)
54 {
55 PMDL mdl;
56 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
58 while(xi->page_free != 0)
59 {
60 xi->page_free--;
61 mdl = xi->page_list[xi->page_free];
62 xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
63 *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)), 0);
64 FreePages(mdl);
65 }
66 }
68 static VOID
69 put_page_on_freelist(struct xennet_info *xi, PMDL mdl)
70 {
71 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
73 // KdPrint(("Mdl = %p\n", mdl));
75 xi->page_list[xi->page_free] = mdl;
76 xi->page_free++;
78 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
79 }
81 static __inline grant_ref_t
82 get_grant_ref(PMDL mdl)
83 {
84 return *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE));
85 }
87 // Called at DISPATCH_LEVEL with rx lock held
88 static NDIS_STATUS
89 XenNet_RxBufferAlloc(struct xennet_info *xi)
90 {
91 unsigned short id;
92 PMDL mdl;
93 int i, batch_target, notify;
94 RING_IDX req_prod = xi->rx.req_prod_pvt;
95 netif_rx_request_t *req;
96 int cycles = 0;
97 #if defined(XEN_PROFILE)
98 LARGE_INTEGER tsc, dummy;
99 #endif
101 //KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
102 #if defined(XEN_PROFILE)
103 tsc = KeQueryPerformanceCounter(&dummy);
104 #endif
106 batch_target = xi->rx_target - (req_prod - xi->rx.rsp_cons);
108 for (i = 0; i < batch_target; i++)
109 {
110 ASSERT(cycles++ < 256);
111 if (xi->rx_id_free == 0)
112 break;
113 mdl = get_page_from_freelist(xi);
114 if (mdl == NULL)
115 break;
116 xi->rx_id_free--;
118 /* Give to netback */
119 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
120 // KdPrint((__DRIVER_NAME " id = %d\n", id));
121 ASSERT(xi->rx_buffers[id] == NULL);
122 xi->rx_buffers[id] = mdl;
123 req = RING_GET_REQUEST(&xi->rx, req_prod + i);
124 req->gref = get_grant_ref(mdl);
125 req->id = id;
126 }
128 xi->rx.req_prod_pvt = req_prod + i;
129 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx, notify);
130 if (notify)
131 {
132 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
133 xi->event_channel);
134 }
136 //KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring\n", i, batch_target));
138 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
140 #if defined(XEN_PROFILE)
141 ProfTime_RxBufferAlloc.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
142 ProfCount_RxBufferAlloc++;
143 #endif
145 return NDIS_STATUS_SUCCESS;
146 }
148 #define __NET_USHORT_BYTE_0(x) ((USHORT)(x & 0xFF))
149 #define __NET_USHORT_BYTE_1(x) ((USHORT)((PUCHAR)&x)[1] & 0xFF)
151 #define GET_NET_USHORT(x) ((__NET_USHORT_BYTE_0(x) << 8) | __NET_USHORT_BYTE_1(x))
152 #define SET_NET_USHORT(y, x) *((USHORT *)&(y)) = ((__NET_USHORT_BYTE_0(x) << 8) | __NET_USHORT_BYTE_1(x))
154 #define GET_NET_ULONG(x) ((GET_NET_USHORT(x) << 16) | GET_NET_USHORT(((PUCHAR)&x)[2]))
155 #define SET_NET_ULONG(y, x) *((ULONG *)&(y)) = ((GET_NET_USHORT(x) << 16) | GET_NET_USHORT(((PUCHAR)&x)[2]))
157 static VOID
158 XenNet_ParseHeader(
159 struct xennet_info *xi
160 )
161 {
162 USHORT i;
163 PMDL mdl;
164 UINT header_length;
166 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
168 ASSERT(xi->rxpi.mdls[0]);
170 NdisQueryBufferSafe(xi->rxpi.mdls[0], &xi->rxpi.header, &header_length, NormalPagePriority);
172 if (header_length < XN_HDR_SIZE + 20 + 20) // minimum size of first buffer is ETH + IP + TCP header
173 {
174 return;
175 }
177 switch (GET_NET_USHORT(xi->rxpi.header[12])) // L2 protocol field
178 {
179 case 0x0800:
180 xi->rxpi.ip_version = (xi->rxpi.header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
181 if (xi->rxpi.ip_version != 4)
182 {
183 KdPrint((__DRIVER_NAME " ip_version = %d\n", xi->rxpi.ip_version));
184 return;
185 }
186 xi->rxpi.ip4_header_length = (xi->rxpi.header[XN_HDR_SIZE + 0] & 0x0F) << 2;
187 if (header_length < (ULONG)(xi->rxpi.ip4_header_length + 20))
188 {
189 KdPrint((__DRIVER_NAME " first packet is only %d long, must be >= %d\n", XN_HDR_SIZE + header_length, (ULONG)(XN_HDR_SIZE + xi->rxpi.ip4_header_length + 20)));
190 // we need to do something conclusive here...
191 return;
192 }
193 break;
194 default:
195 // KdPrint((__DRIVER_NAME " Not IP\n"));
196 return;
197 }
198 xi->rxpi.ip_proto = xi->rxpi.header[XN_HDR_SIZE + 9];
199 switch (xi->rxpi.ip_proto)
200 {
201 case 6: // TCP
202 case 17: // UDP
203 break;
204 default:
205 return;
206 }
207 xi->rxpi.ip4_length = GET_NET_USHORT(xi->rxpi.header[XN_HDR_SIZE + 2]);
208 xi->rxpi.tcp_header_length = (xi->rxpi.header[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 12] & 0xf0) >> 2;
209 xi->rxpi.tcp_length = xi->rxpi.ip4_length - xi->rxpi.ip4_header_length - xi->rxpi.tcp_header_length;
210 xi->rxpi.tcp_remaining = xi->rxpi.tcp_length;
211 xi->rxpi.tcp_seq = GET_NET_ULONG(xi->rxpi.header[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 4]);
212 if (xi->rxpi.mss > 0 && xi->rxpi.tcp_length > xi->rxpi.mss)
213 xi->rxpi.split_required = TRUE;
214 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
215 }
217 /*
218 Windows appears to insist that the checksum on received packets is correct, and won't
219 believe us when we lie about it, which happens when the packet is generated on the
220 same bridge in Dom0. Doh!
221 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
222 */
223 static VOID
224 XenNet_SumPacket(
225 struct xennet_info *xi,
226 PNDIS_PACKET packet
227 )
228 {
229 USHORT i;
230 PUCHAR buffer;
231 PMDL mdl;
232 UINT total_length;
233 UINT buffer_length;
234 USHORT buffer_offset;
235 ULONG csum, pre_csum;
236 PUSHORT csum_ptr;
237 USHORT remaining;
238 USHORT tcp_length;
240 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
242 #if defined(XEN_PROFILE)
243 ProfCount_RxPacketsCsumOffload++;
244 #endif
246 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
247 ASSERT(mdl);
249 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 16];
250 *csum_ptr = 0;
252 ASSERT((USHORT)(xi->rxpi.ip4_length + XN_HDR_SIZE) == xi->rxpi.total_length);
254 remaining = xi->rxpi.ip4_length - xi->rxpi.ip4_header_length - xi->rxpi.tcp_header_length;
255 // TODO: pre-calc a sum of the header...
256 pre_csum = 0;
257 pre_csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 12]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 14]);
258 pre_csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 16]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 18]);
259 pre_csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
261 remaining = xi->rxpi.ip4_length - xi->rxpi.ip4_header_length;
263 csum = pre_csum + remaining;
265 for (buffer_offset = i = XN_HDR_SIZE + xi->rxpi.ip4_header_length; i < total_length - 1; i += 2, buffer_offset += 2)
266 {
267 if (buffer_offset == buffer_length - 1) // deal with a buffer ending on an odd byte boundary
268 {
269 csum += (USHORT)buffer[buffer_offset] << 8;
270 NdisGetNextBuffer(mdl, &mdl);
271 if (mdl == NULL)
272 {
273 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
274 return;
275 }
276 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
277 KdPrint((__DRIVER_NAME " New buffer - unaligned...\n"));
278 csum += ((USHORT)buffer[0]);
279 buffer_offset = -1;
280 }
281 else
282 {
283 if (buffer_offset == buffer_length)
284 {
285 KdPrint((__DRIVER_NAME " New buffer - aligned...\n"));
286 NdisGetNextBuffer(mdl, &mdl);
287 if (mdl == NULL)
288 {
289 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
290 return;
291 }
292 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
293 buffer_offset = 0;
294 }
295 csum += GET_NET_USHORT(buffer[buffer_offset]);
296 //KdPrint((__DRIVER_NAME " %04X\n", GET_NET_USHORT(buffer[buffer_offset])));
297 }
298 }
299 if (i != total_length) // last odd byte
300 {
301 //KdPrint((__DRIVER_NAME " *%04X\n", (USHORT)buffer[buffer_offset] << 8));
302 csum += ((USHORT)buffer[buffer_offset] << 8);
303 }
304 while (csum & 0xFFFF0000)
305 csum = (csum & 0xFFFF) + (csum >> 16);
306 *csum_ptr = (USHORT)~GET_NET_USHORT(csum);
308 KdPrint((__DRIVER_NAME " csum = %04x\n", *csum_ptr));
310 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
311 }
313 static PUCHAR
314 XenNet_GetData(
315 struct xennet_info *xi,
316 USHORT req_length,
317 PUSHORT length
318 )
319 {
320 PNDIS_BUFFER mdl = xi->rxpi.mdls[xi->rxpi.curr_mdl];
321 PUCHAR buffer = (PUCHAR)MmGetMdlVirtualAddress(mdl) + xi->rxpi.curr_mdl_offset;
323 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
325 *length = min(req_length, MmGetMdlByteCount(mdl) - xi->rxpi.curr_mdl_offset);
327 KdPrint((__DRIVER_NAME " req_length = %d, length = %d\n", req_length, *length));
329 xi->rxpi.curr_mdl_offset += *length;
330 if (xi->rxpi.curr_mdl_offset == MmGetMdlByteCount(mdl))
331 {
332 xi->rxpi.curr_mdl++;
333 xi->rxpi.curr_mdl_offset = 0;
334 }
336 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
338 return buffer;
339 }
341 static PNDIS_PACKET
342 XenNet_MakePacket(
343 struct xennet_info *xi
344 )
345 {
346 PNDIS_PACKET packet;
347 PUCHAR in_buffer;
348 PNDIS_BUFFER out_mdl;
349 PUCHAR out_buffer;
350 USHORT out_offset;
351 USHORT out_remaining;
352 USHORT length;
353 USHORT new_ip4_length;
354 NDIS_STATUS status;
355 int i;
357 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
359 NdisAllocatePacket(&status, &packet, xi->packet_pool);
360 ASSERT(status == NDIS_STATUS_SUCCESS);
361 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
363 if (!xi->rxpi.split_required)
364 {
365 for (i = 0; i < xi->rxpi.mdl_count; i++)
366 NdisChainBufferAtBack(packet, xi->rxpi.mdls[i]);
367 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
368 }
369 else
370 {
371 out_mdl = get_page_from_freelist(xi);
372 out_buffer = MmGetMdlVirtualAddress(out_mdl);
373 out_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
374 out_remaining = min(xi->rxpi.mss, xi->rxpi.tcp_remaining);
375 NdisAdjustBufferLength(out_mdl, out_offset + out_remaining);
376 memcpy(out_buffer, xi->rxpi.header, out_offset);
377 new_ip4_length = out_remaining + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
378 SET_NET_USHORT(out_buffer[XN_HDR_SIZE + 2], new_ip4_length);
379 SET_NET_ULONG(out_buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 4], xi->rxpi.tcp_seq);
380 xi->rxpi.tcp_seq += out_remaining;
381 xi->rxpi.tcp_remaining -= out_remaining;
382 do
383 {
384 in_buffer = XenNet_GetData(xi, out_remaining, &length);
385 memcpy(&out_buffer[out_offset], in_buffer, length);
386 out_remaining -= length;
387 out_offset += length;
388 } while (out_remaining != 0 && in_buffer != NULL);
389 //length = xi->rxpi.mss - out_remaining;
390 NdisChainBufferAtBack(packet, out_mdl);
391 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
392 }
394 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (%p)\n", packet));
395 return packet;
396 }
398 static VOID
399 XenNet_MakePackets(
400 struct xennet_info *xi,
401 PNDIS_PACKET *packets,
402 PULONG packet_count_p
403 )
404 {
405 PNDIS_PACKET first_packet;
406 PNDIS_PACKET curr_packet;
407 PNDIS_BUFFER mdls[MAX_BUFFERS_PER_PACKET];
408 ULONG mdl_count = 0;
409 ULONG curr_in_mdl_index;
410 PNDIS_BUFFER curr_out_mdl;
411 ULONG curr_in_offset;
412 ULONG curr_out_offset;
413 PUCHAR curr_in_buffer;
414 PUCHAR curr_out_buffer;
415 USHORT i;
417 ULONG total_in_remaining;
418 ULONG buffer_in_remaining;
420 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "(packets = %p, packet_count = %d)\n", packets, *packet_count_p));
422 XenNet_ParseHeader(xi);
423 switch (xi->rxpi.ip_proto)
424 {
425 case 6: // TCP
426 if (xi->rxpi.split_required)
427 break;
428 // fallthrough
429 case 17: // UDP
430 packets[*packet_count_p] = XenNet_MakePacket(xi);
431 if (xi->rxpi.csum_calc_required)
432 XenNet_SumPacket(xi, packets[*packet_count_p]);
433 (*packet_count_p)++;
434 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (TCP/UDP)\n"));
435 return;
436 default:
437 packets[*packet_count_p] = XenNet_MakePacket(xi);
438 (*packet_count_p)++;
439 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (Other)\n"));
440 return;
441 }
442 KdPrint((__DRIVER_NAME " splitting packet\n"));
443 xi->rxpi.tcp_remaining = xi->rxpi.tcp_length;
444 if (MmGetMdlByteCount(xi->rxpi.mdls[0]) > XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length)
445 xi->rxpi.curr_mdl_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
446 else
447 xi->rxpi.curr_mdl = 1;
449 while (xi->rxpi.tcp_remaining)
450 {
451 KdPrint((__DRIVER_NAME " tcp_remaining = %d\n", xi->rxpi.tcp_remaining));
452 packets[*packet_count_p] = XenNet_MakePacket(xi);
453 XenNet_SumPacket(xi, packets[*packet_count_p]);
454 (*packet_count_p)++;
455 }
456 KdPrint((__DRIVER_NAME " tcp_remaining = %d\n", xi->rxpi.tcp_remaining));
457 // TODO: restore psh status to last packet
458 for (i = 0; i < mdl_count; i++)
459 {
460 NdisAdjustBufferLength(mdls[i], PAGE_SIZE);
461 put_page_on_freelist(xi, mdls[i]);
462 }
463 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (split)\n"));
464 }
466 // Called at DISPATCH_LEVEL
467 NDIS_STATUS
468 XenNet_RxBufferCheck(struct xennet_info *xi)
469 {
470 RING_IDX cons, prod;
471 PNDIS_PACKET packets[NET_RX_RING_SIZE];
472 ULONG packet_count;
473 PMDL mdl;
474 int moretodo;
475 struct netif_rx_response *rxrsp = NULL;
476 struct netif_extra_info *ei;
477 NDIS_STATUS status;
478 USHORT id;
479 int cycles = 0;
480 #if defined(XEN_PROFILE)
481 LARGE_INTEGER tsc, tsc2, dummy;
482 #endif
484 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
486 #if defined(XEN_PROFILE)
487 tsc = tsc2 = KeQueryPerformanceCounter(&dummy);
488 #endif
490 ASSERT(xi->connected);
492 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
494 packet_count = 0;
495 do {
496 ASSERT(cycles++ < 256);
497 prod = xi->rx.sring->rsp_prod;
498 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
500 for (cons = xi->rx.rsp_cons; cons != prod; cons++)
501 {
502 ASSERT(cycles++ < 256);
503 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
504 mdl = xi->rx_buffers[id];
505 xi->rx_buffers[id] = NULL;
506 xi->rx_id_free++;
507 if (xi->rxpi.extra_info)
508 {
509 put_page_on_freelist(xi, mdl);
510 ei = (struct netif_extra_info *)RING_GET_RESPONSE(&xi->rx, cons);
511 xi->rxpi.extra_info = !!(ei->flags & XEN_NETIF_EXTRA_FLAG_MORE);
512 switch (ei->type)
513 {
514 case XEN_NETIF_EXTRA_TYPE_GSO:
515 switch (ei->u.gso.type)
516 {
517 case XEN_NETIF_GSO_TYPE_TCPV4:
518 xi->rxpi.mss = (PVOID)(xen_ulong_t)(ei->u.gso.size);
519 // TODO - put this assertion somewhere ASSERT(header_len + xi->rxpi.mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
520 break;
521 default:
522 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
523 break;
524 }
525 break;
526 default:
527 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
528 break;
529 }
530 }
531 else
532 {
533 rxrsp = RING_GET_RESPONSE(&xi->rx, cons);
534 if (rxrsp->status <= 0
535 || rxrsp->offset + rxrsp->status > PAGE_SIZE)
536 {
537 KdPrint((__DRIVER_NAME ": Error: rxrsp offset %d, size %d\n",
538 rxrsp->offset, rxrsp->status));
539 put_page_on_freelist(xi, mdl);
540 continue;
541 }
542 ASSERT(rxrsp->id == id);
543 if (!xi->rxpi.more_frags) // handling the packet's 1st buffer
544 {
546 if (rxrsp->flags & (NETRXF_csum_blank|NETRXF_data_validated) && xi->config_csum)
547 {
548 //KdPrint((__DRIVER_NAME " RX csum blank = %d, validated = %d\n", !!(rxrsp->flags & NETRXF_csum_blank), !!(rxrsp->flags & NETRXF_data_validated)));
549 if (rxrsp->flags & NETRXF_csum_blank)
550 xi->rxpi.csum_calc_required = TRUE;
551 }
552 }
553 NdisAdjustBufferLength(mdl, rxrsp->status);
554 xi->rxpi.mdls[xi->rxpi.mdl_count++] = mdl;
555 xi->rxpi.extra_info = !!(rxrsp->flags & NETRXF_extra_info);
556 xi->rxpi.more_frags = !!(rxrsp->flags & NETRXF_more_data);
557 xi->rxpi.total_length += rxrsp->status;
558 }
560 /* Packet done, add it to the list */
561 if (!xi->rxpi.more_frags && !xi->rxpi.extra_info)
562 {
563 XenNet_MakePackets(xi, packets, &packet_count);
564 RtlZeroMemory(&xi->rxpi, sizeof(xi->rxpi));
565 }
566 }
567 xi->rx.rsp_cons = prod;
569 RING_FINAL_CHECK_FOR_RESPONSES(&xi->rx, moretodo);
570 } while (moretodo);
572 /* Give netback more buffers */
573 XenNet_RxBufferAlloc(xi);
575 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
577 #if defined(XEN_PROFILE)
578 ProfTime_RxBufferCheckTopHalf.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc2.QuadPart;
579 tsc2 = KeQueryPerformanceCounter(&dummy);
580 #endif
582 if (packet_count > 0)
583 {
584 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
585 #if defined(XEN_PROFILE)
586 ProfCount_CallsToIndicateReceive++;
587 #endif
588 }
590 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
592 #if defined(XEN_PROFILE)
593 ProfTime_RxBufferCheck.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
594 ProfTime_RxBufferCheckBotHalf.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc2.QuadPart;
595 ProfCount_RxBufferCheck++;
596 #endif
598 return NDIS_STATUS_SUCCESS;
599 }
601 /* called at DISPATCH_LEVEL */
603 VOID
604 XenNet_ReturnPacket(
605 IN NDIS_HANDLE MiniportAdapterContext,
606 IN PNDIS_PACKET Packet
607 )
608 {
609 struct xennet_info *xi = MiniportAdapterContext;
610 PMDL mdl;
611 int cycles = 0;
612 #if defined(XEN_PROFILE)
613 LARGE_INTEGER tsc, dummy;
614 #endif
616 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (%p)\n", Packet));
618 #if defined(XEN_PROFILE)
619 tsc = KeQueryPerformanceCounter(&dummy);
620 #endif
622 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
624 NdisUnchainBufferAtBack(Packet, &mdl);
625 while (mdl)
626 {
627 ASSERT(cycles++ < 256);
628 NdisAdjustBufferLength(mdl, PAGE_SIZE);
629 put_page_on_freelist(xi, mdl);
630 NdisUnchainBufferAtBack(Packet, &mdl);
631 }
633 NdisFreePacket(Packet);
635 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
637 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
639 #if defined(XEN_PROFILE)
640 ProfTime_ReturnPacket.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
641 ProfCount_ReturnPacket++;
642 #endif
643 }
645 /*
646 Free all Rx buffers (on halt, for example)
647 The ring must be stopped at this point.
648 */
649 static void
650 XenNet_RxBufferFree(struct xennet_info *xi)
651 {
652 int i;
653 PMDL mdl;
655 ASSERT(!xi->connected);
657 for (i = 0; i < NET_RX_RING_SIZE; i++)
658 {
659 if (!xi->rx_buffers[i])
660 continue;
662 mdl = xi->rx_buffers[i];
663 NdisAdjustBufferLength(mdl, PAGE_SIZE);
664 put_page_on_freelist(xi, mdl);
665 }
666 }
668 BOOLEAN
669 XenNet_RxInit(xennet_info_t *xi)
670 {
671 int i;
673 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
675 xi->rx_mdl = AllocatePage();
676 xi->rx_pgs = MmGetMdlVirtualAddress(xi->rx_mdl);
677 SHARED_RING_INIT(xi->rx_pgs);
678 FRONT_RING_INIT(&xi->rx, xi->rx_pgs, PAGE_SIZE);
679 xi->rx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
680 xi->XenInterface.InterfaceHeader.Context, 0,
681 *MmGetMdlPfnArray(xi->rx_mdl), FALSE, 0);
682 xi->rx_id_free = NET_RX_RING_SIZE;
684 for (i = 0; i < NET_RX_RING_SIZE; i++)
685 {
686 xi->rx_buffers[i] = NULL;
687 }
689 XenNet_RxBufferAlloc(xi);
691 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
693 return TRUE;
694 }
696 BOOLEAN
697 XenNet_RxShutdown(xennet_info_t *xi)
698 {
699 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
701 XenNet_RxBufferFree(xi);
703 free_page_freelist(xi);
705 /* free RX resources */
706 if (xi->XenInterface.GntTbl_EndAccess(
707 xi->XenInterface.InterfaceHeader.Context, xi->rx_ring_ref, 0))
708 {
709 xi->rx_ring_ref = GRANT_INVALID_REF;
710 FreePages(xi->rx_mdl);
711 }
712 xi->rx_pgs = NULL;
714 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
716 return TRUE;
717 }