win-pvdrivers

view xennet/xennet_rx.c @ 1048:fba0ce4d9e54

Fix checksum problem on lso for xp/2003. Tidy up.
author James Harper <james.harper@bendigoit.com.au>
date Mon May 13 21:14:35 2013 +1000 (2013-05-13)
parents cb767700f91c
children 2ef536c2d9fe
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static __inline shared_buffer_t *
24 get_pb_from_freelist(struct xennet_info *xi)
25 {
26 shared_buffer_t *pb;
27 PVOID ptr_ref;
29 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
30 {
31 pb = ptr_ref;
32 pb->ref_count = 1;
33 InterlockedDecrement(&xi->rx_pb_free);
34 return pb;
35 }
37 /* don't allocate a new one if we are shutting down */
38 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
39 return NULL;
41 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
42 if (!pb)
43 return NULL;
44 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
45 if (!pb->virtual)
46 {
47 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
48 return NULL;
49 }
50 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
51 if (!pb->mdl)
52 {
53 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
54 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
55 return NULL;
56 }
57 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
58 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
59 if (pb->gref == INVALID_GRANT_REF)
60 {
61 IoFreeMdl(pb->mdl);
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 MmBuildMdlForNonPagedPool(pb->mdl);
67 pb->ref_count = 1;
68 return pb;
69 }
71 static __inline VOID
72 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
73 {
74 UNREFERENCED_PARAMETER(xi);
75 InterlockedIncrement(&pb->ref_count);
76 }
78 static __inline VOID
79 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
80 {
81 if (InterlockedDecrement(&pb->ref_count) == 0)
82 {
83 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
84 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
85 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
86 {
87 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
88 IoFreeMdl(pb->mdl);
89 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
90 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
91 return;
92 }
93 pb->mdl->ByteCount = PAGE_SIZE;
94 pb->mdl->Next = NULL;
95 pb->next = NULL;
96 stack_push(xi->rx_pb_stack, pb);
97 InterlockedIncrement(&xi->rx_pb_free);
98 }
99 }
101 static __inline shared_buffer_t *
102 get_hb_from_freelist(struct xennet_info *xi)
103 {
104 shared_buffer_t *hb;
105 PVOID ptr_ref;
107 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
108 {
109 hb = ptr_ref;
110 InterlockedDecrement(&xi->rx_hb_free);
111 return hb;
112 }
114 /* don't allocate a new one if we are shutting down */
115 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
116 return NULL;
118 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
119 if (!hb)
120 return NULL;
121 NdisZeroMemory(hb, sizeof(shared_buffer_t));
122 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
123 if (!hb->mdl) {
124 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
125 return NULL;
126 }
127 MmBuildMdlForNonPagedPool(hb->mdl);
128 return hb;
129 }
131 static __inline VOID
132 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
133 {
134 XN_ASSERT(xi);
135 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
136 hb->mdl->Next = NULL;
137 hb->next = NULL;
138 stack_push(xi->rx_hb_stack, hb);
139 InterlockedIncrement(&xi->rx_hb_free);
140 }
142 // Called at DISPATCH_LEVEL with rx lock held
143 static VOID
144 XenNet_FillRing(struct xennet_info *xi)
145 {
146 unsigned short id;
147 shared_buffer_t *page_buf;
148 ULONG i, notify;
149 ULONG batch_target;
150 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
151 netif_rx_request_t *req;
153 //FUNCTION_ENTER();
155 if (xi->device_state != DEVICE_STATE_ACTIVE)
156 return;
158 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
160 if (batch_target < (xi->rx_target >> 2)) {
161 //FUNCTION_EXIT();
162 return; /* only refill if we are less than 3/4 full already */
163 }
165 for (i = 0; i < batch_target; i++) {
166 page_buf = get_pb_from_freelist(xi);
167 if (!page_buf) {
168 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target));
169 break;
170 }
171 xi->rx_id_free--;
173 /* Give to netback */
174 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
175 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
176 xi->rx_ring_pbs[id] = page_buf;
177 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
178 req->id = id;
179 req->gref = page_buf->gref;
180 XN_ASSERT(req->gref != INVALID_GRANT_REF);
181 }
182 KeMemoryBarrier();
183 xi->rx_ring.req_prod_pvt = req_prod + i;
184 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
185 if (notify) {
186 XnNotify(xi->handle, xi->event_channel);
187 }
189 //FUNCTION_EXIT();
191 return;
192 }
194 #if NTDDI_VERSION < NTDDI_VISTA
195 typedef struct {
196 PNDIS_PACKET first_packet;
197 PNDIS_PACKET last_packet;
198 ULONG packet_count;
199 } rx_context_t;
200 #else
201 typedef struct {
202 PNET_BUFFER_LIST first_nbl;
203 PNET_BUFFER_LIST last_nbl;
204 ULONG packet_count;
205 ULONG nbl_count;
206 } rx_context_t;
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 /*
211 NDIS5 appears to insist that the checksum on received packets is correct, and won't
212 believe us when we lie about it, which happens when the packet is generated on the
213 same bridge in Dom0. Doh!
214 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
215 */
217 static BOOLEAN
218 XenNet_SumPacketData(
219 packet_info_t *pi,
220 PNDIS_PACKET packet,
221 BOOLEAN set_csum) {
222 USHORT i;
223 PUCHAR buffer;
224 PMDL mdl;
225 UINT total_length;
226 UINT data_length;
227 UINT buffer_length;
228 USHORT buffer_offset;
229 ULONG csum;
230 PUSHORT csum_ptr;
231 USHORT remaining;
232 USHORT ip4_length;
233 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
235 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
236 if (!buffer) {
237 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
238 return FALSE;
239 }
240 XN_ASSERT(mdl);
242 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
243 data_length = ip4_length + XN_HDR_SIZE;
245 if ((USHORT)data_length > total_length) {
246 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
247 return FALSE;
248 }
250 switch (pi->ip_proto) {
251 case 6:
252 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
253 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
254 break;
255 case 17:
256 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
257 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
258 break;
259 default:
260 FUNCTION_MSG("Don't know how to calc sum for IP Proto %d\n", pi->ip_proto);
261 //FUNCTION_EXIT();
262 return FALSE; // should never happen
263 }
265 if (set_csum)
266 *csum_ptr = 0;
268 csum = 0;
269 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
270 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
271 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
273 remaining = ip4_length - pi->ip4_header_length;
275 csum += remaining;
277 csum_span = FALSE;
278 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
279 while (i < data_length) {
280 /* don't include the checksum field itself in the calculation */
281 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
282 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
283 i += 2;
284 buffer_offset += 2;
285 continue;
286 }
287 if (csum_span) {
288 /* the other half of the next bit */
289 XN_ASSERT(buffer_offset == 0);
290 csum += (USHORT)buffer[buffer_offset];
291 csum_span = FALSE;
292 i += 1;
293 buffer_offset += 1;
294 } else if (buffer_offset == buffer_length - 1) {
295 /* deal with a buffer ending on an odd byte boundary */
296 csum += (USHORT)buffer[buffer_offset] << 8;
297 csum_span = TRUE;
298 i += 1;
299 buffer_offset += 1;
300 } else {
301 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
302 i += 2;
303 buffer_offset += 2;
304 }
305 if (buffer_offset == buffer_length && i < total_length) {
306 NdisGetNextBuffer(mdl, &mdl);
307 if (mdl == NULL) {
308 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
309 return FALSE; // should never happen
310 }
311 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
312 XN_ASSERT(buffer_length);
313 buffer_offset = 0;
314 }
315 }
317 while (csum & 0xFFFF0000)
318 csum = (csum & 0xFFFF) + (csum >> 16);
320 if (set_csum) {
321 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
322 } else {
323 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
324 }
325 return TRUE;
326 }
327 #endif
329 static BOOLEAN
330 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
331 #if NTDDI_VERSION < NTDDI_VISTA
332 NDIS_STATUS status;
333 PNDIS_PACKET packet;
334 #else
335 PNET_BUFFER_LIST nbl;
336 PNET_BUFFER packet;
337 #endif
338 PMDL mdl_head, mdl_tail, curr_mdl;
339 PUCHAR header_va;
340 ULONG out_remaining;
341 ULONG header_extra;
342 shared_buffer_t *header_buf;
343 #if NTDDI_VERSION < NTDDI_VISTA
344 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
345 UINT packet_length;
346 #else
347 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
348 #endif
349 //FUNCTION_ENTER();
351 #if NTDDI_VERSION < NTDDI_VISTA
352 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
353 if (status != NDIS_STATUS_SUCCESS) {
354 FUNCTION_MSG("No free packets\n");
355 return FALSE;
356 }
357 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
358 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
359 #else
360 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
361 if (!nbl) {
362 /* buffers will be freed in MakePackets */
363 KdPrint((__DRIVER_NAME " No free nbls\n"));
364 //FUNCTION_EXIT();
365 return FALSE;
366 }
368 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
369 if (!packet) {
370 KdPrint((__DRIVER_NAME " No free packets\n"));
371 NdisFreeNetBufferList(nbl);
372 //FUNCTION_EXIT();
373 return FALSE;
374 }
375 #endif
377 if (!pi->first_mdl->Next && !pi->split_required) {
378 /* a single buffer <= MTU */
379 header_buf = NULL;
380 XenNet_BuildHeader(pi, pi->first_mdl_virtual, pi->first_mdl_length);
381 #if NTDDI_VERSION < NTDDI_VISTA
382 NdisChainBufferAtBack(packet, pi->first_mdl);
383 PACKET_FIRST_PB(packet) = pi->first_pb;
384 #else
385 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
386 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
387 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
388 NET_BUFFER_DATA_OFFSET(packet) = 0;
389 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
390 NB_FIRST_PB(packet) = pi->first_pb;
391 #endif
392 ref_pb(xi, pi->first_pb);
393 } else {
394 XN_ASSERT(ndis_os_minor_version >= 1);
395 header_buf = get_hb_from_freelist(xi);
396 if (!header_buf) {
397 FUNCTION_MSG("No free header buffers\n");
398 #if NTDDI_VERSION < NTDDI_VISTA
399 NdisUnchainBufferAtFront(packet, &curr_mdl);
400 NdisFreePacket(packet);
401 #else
402 NdisFreeNetBufferList(nbl);
403 NdisFreeNetBuffer(packet);
404 #endif
405 return FALSE;
406 }
407 header_va = (PUCHAR)(header_buf + 1);
408 NdisMoveMemory(header_va, pi->header, pi->header_length);
409 //if (pi->ip_proto == 50) {
410 // FUNCTION_MSG("header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead);
411 // FUNCTION_MSG("ip4_header_length = %d\n", pi->ip4_header_length);
412 // FUNCTION_MSG("tcp_header_length = %d\n", pi->tcp_header_length);
413 //}
414 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
415 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
416 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
417 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
418 header_buf->mdl->ByteCount = pi->header_length;
419 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
420 #if NTDDI_VERSION < NTDDI_VISTA
421 PACKET_FIRST_PB(packet) = header_buf;
422 header_buf->next = pi->curr_pb;
423 NdisChainBufferAtBack(packet, mdl_head);
424 #else
425 NB_FIRST_PB(packet) = header_buf;
426 header_buf->next = pi->curr_pb;
427 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
428 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
429 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
430 NET_BUFFER_DATA_OFFSET(packet) = 0;
431 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
432 #endif
434 if (pi->split_required) {
435 /* must be ip4 */
436 ULONG tcp_length;
437 USHORT new_ip4_length;
438 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
439 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
440 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
441 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
442 pi->tcp_seq += tcp_length;
443 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
444 /* part of the packet is already present in the header buffer for lookahead */
445 out_remaining = tcp_length - header_extra;
446 XN_ASSERT((LONG)out_remaining >= 0);
447 } else {
448 out_remaining = pi->total_length - pi->header_length;
449 XN_ASSERT((LONG)out_remaining >= 0);
450 }
452 while (out_remaining != 0) {
453 //ULONG in_buffer_offset;
454 ULONG in_buffer_length;
455 ULONG out_length;
457 //if (pi->ip_proto == 50) {
458 // FUNCTION_MSG("in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb);
459 //}
460 if (!pi->curr_mdl || !pi->curr_pb) {
461 KdPrint((__DRIVER_NAME " out of buffers for packet\n"));
462 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
463 // TODO: free some stuff or we'll leak
464 /* unchain buffers then free packet */
465 //FUNCTION_EXIT();
466 return FALSE;
467 }
469 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
470 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
471 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
472 XN_ASSERT(curr_mdl);
473 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
474 mdl_tail->Next = curr_mdl;
475 mdl_tail = curr_mdl;
476 curr_mdl->Next = NULL; /* I think this might be redundant */
477 #if NTDDI_VERSION < NTDDI_VISTA
478 #else
479 NET_BUFFER_DATA_LENGTH(packet) += out_length;
480 #endif
481 ref_pb(xi, pi->curr_pb);
482 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
483 if (pi->curr_mdl_offset == in_buffer_length) {
484 pi->curr_mdl = pi->curr_mdl->Next;
485 pi->curr_pb = pi->curr_pb->next;
486 pi->curr_mdl_offset = 0;
487 }
488 out_remaining -= out_length;
489 }
490 #if NTDDI_VERSION < NTDDI_VISTA
491 if (pi->split_required) {
492 // TODO: only if Ip checksum is disabled...
493 XenNet_SumIpHeader(header_va, pi->ip4_header_length);
494 }
495 #endif
496 if (header_extra > 0)
497 pi->header_length -= header_extra;
498 }
500 rc->packet_count++;
501 #if NTDDI_VERSION < NTDDI_VISTA
502 #else
503 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
504 #endif
506 if (pi->parse_result == PARSE_OK) {
507 #if NTDDI_VERSION < NTDDI_VISTA
508 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
509 packet, TcpIpChecksumPacketInfo);
510 XN_ASSERT(csum_info->Value == 0);
511 if (pi->csum_blank || pi->data_validated || pi->split_required) {
512 BOOLEAN checksum_offload = FALSE;
513 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
514 if (xi->setting_csum.V4Receive.IpChecksum) {
515 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
516 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
517 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
518 else
519 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
520 }
521 }
522 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
523 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
524 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
525 checksum_offload = TRUE;
526 }
527 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
528 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
529 checksum_offload = TRUE;
530 }
531 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
532 XenNet_SumPacketData(pi, packet, TRUE);
533 }
534 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
535 if (xi->setting_csum.V4Receive.IpChecksum) {
536 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
537 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
538 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
539 else
540 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
541 }
542 }
543 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
544 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
545 if (XenNet_SumPacketData(pi, packet, FALSE)) {
546 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
547 } else {
548 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
549 }
550 }
551 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
552 if (XenNet_SumPacketData(pi, packet, FALSE)) {
553 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
554 } else {
555 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
556 }
557 }
558 }
559 #else
560 csum_info.Value = 0;
561 if (pi->csum_blank || pi->data_validated || pi->mss) {
562 if (pi->ip_proto == 6) {
563 csum_info.Receive.IpChecksumSucceeded = TRUE;
564 csum_info.Receive.TcpChecksumSucceeded = TRUE;
565 } else if (pi->ip_proto == 17) {
566 csum_info.Receive.IpChecksumSucceeded = TRUE;
567 csum_info.Receive.UdpChecksumSucceeded = TRUE;
568 }
569 }
570 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
571 #endif
572 }
574 #if NTDDI_VERSION < NTDDI_VISTA
575 if (!rc->first_packet) {
576 rc->first_packet = packet;
577 } else {
578 PACKET_NEXT_PACKET(rc->last_packet) = packet;
579 }
580 rc->last_packet = packet;
581 rc->packet_count++;
582 #else
583 if (!rc->first_nbl) {
584 rc->first_nbl = nbl;
585 } else {
586 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
587 }
588 rc->last_nbl = nbl;
589 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
590 rc->nbl_count++;
591 if (pi->is_multicast) {
592 /* multicast */
593 xi->stats.ifHCInMulticastPkts++;
594 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
595 } else if (pi->is_broadcast) {
596 /* broadcast */
597 xi->stats.ifHCInBroadcastPkts++;
598 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
599 } else {
600 /* unicast */
601 xi->stats.ifHCInUcastPkts++;
602 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
603 }
604 #endif
606 #if NTDDI_VERSION < NTDDI_VISTA
607 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
608 NdisQueryPacketLength(packet, &packet_length);
609 if (pi->parse_result != PARSE_OK || (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH))
610 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
611 else
612 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
613 #endif
614 //FUNCTION_EXIT();
616 InterlockedIncrement(&xi->rx_outstanding);
617 //FUNCTION_EXIT();
618 return TRUE;
619 }
621 static VOID
622 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
623 {
624 UCHAR psh;
625 shared_buffer_t *page_buf;
627 XenNet_ParsePacketHeader(pi, NULL, XN_HDR_SIZE + xi->current_lookahead);
629 if (!XenNet_FilterAcceptPacket(xi, pi)) {
630 goto done;
631 }
633 if (pi->split_required) {
634 #if NTDDI_VERSION < NTDDI_VISTA
635 /* need to split to mss for NDIS5 */
636 #else
637 switch (xi->current_gso_rx_split_type) {
638 case RX_LSO_SPLIT_HALF:
639 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
640 break;
641 case RX_LSO_SPLIT_NONE:
642 pi->mss = 65535;
643 break;
644 }
645 #endif
646 }
648 switch (pi->ip_proto) {
649 case 6: // TCP
650 if (pi->split_required)
651 break;
652 /* fall through */
653 case 17: // UDP
654 if (!XenNet_MakePacket(xi, rc, pi)) {
655 FUNCTION_MSG("Failed to make packet\n");
656 #if NTDDI_VERSION < NTDDI_VISTA
657 xi->stat_rx_no_buffer++;
658 #else
659 xi->stats.ifInDiscards++;
660 #endif
661 goto done;
662 }
663 goto done;
664 default:
665 if (!XenNet_MakePacket(xi, rc, pi)) {
666 FUNCTION_MSG("Failed to make packet\n");
667 #if NTDDI_VERSION < NTDDI_VISTA
668 xi->stat_rx_no_buffer++;
669 #else
670 xi->stats.ifInDiscards++;
671 #endif
672 goto done;
673 }
674 goto done;
675 }
677 /* this is the split_required code */
678 pi->tcp_remaining = pi->tcp_length;
680 /* we can make certain assumptions here as the following code is only for tcp4 */
681 psh = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] & 8;
682 while (pi->tcp_remaining) {
683 if (!XenNet_MakePacket(xi, rc, pi)) {
684 FUNCTION_MSG("Failed to make packet\n");
685 #if NTDDI_VERSION < NTDDI_VISTA
686 xi->stat_rx_no_buffer++;
687 #else
688 xi->stats.ifInDiscards++;
689 #endif
690 break; /* we are out of memory - just drop the packets */
691 }
692 if (psh) {
693 if (pi->tcp_remaining)
694 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= ~8;
695 else
696 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] |= 8;
697 }
698 }
699 done:
700 page_buf = pi->first_pb;
701 while (page_buf) {
702 shared_buffer_t *next_pb = page_buf->next;
703 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
704 page_buf = next_pb;
705 }
706 XenNet_ClearPacketInfo(pi);
707 //FUNCTION_EXIT();
708 return;
709 }
711 #if NTDDI_VERSION < NTDDI_VISTA
712 /* called at DISPATCH_LEVEL */
713 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
714 VOID
715 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
716 struct xennet_info *xi = adapter_context;
717 PNDIS_BUFFER buffer;
718 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
720 //FUNCTION_ENTER();
721 NdisUnchainBufferAtFront(packet, &buffer);
723 while (buffer) {
724 shared_buffer_t *next_buf;
725 XN_ASSERT(page_buf);
726 next_buf = page_buf->next;
727 if (!page_buf->virtual) {
728 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
729 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
730 } else {
731 if (buffer != page_buf->mdl)
732 NdisFreeBuffer(buffer);
733 put_pb_on_freelist(xi, page_buf);
734 }
735 NdisUnchainBufferAtFront(packet, &buffer);
736 page_buf = next_buf;
737 }
739 NdisFreePacket(packet);
740 InterlockedDecrement(&xi->rx_outstanding);
741 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
742 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
743 //FUNCTION_EXIT();
744 }
745 #else
746 /* called at <= DISPATCH_LEVEL */
747 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
748 VOID
749 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
750 {
751 struct xennet_info *xi = adapter_context;
752 UNREFERENCED_PARAMETER(return_flags);
754 //FUNCTION_ENTER();
756 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
758 XN_ASSERT(xi);
759 while (curr_nbl)
760 {
761 PNET_BUFFER_LIST next_nbl;
762 PNET_BUFFER curr_nb;
764 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
765 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
766 while (curr_nb)
767 {
768 PNET_BUFFER next_nb;
769 PMDL curr_mdl;
770 shared_buffer_t *page_buf;
772 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
773 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
774 page_buf = NB_FIRST_PB(curr_nb);
775 while (curr_mdl)
776 {
777 shared_buffer_t *next_buf;
778 PMDL next_mdl;
780 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
781 next_mdl = curr_mdl->Next;
782 next_buf = page_buf->next;
783 if (!page_buf->virtual)
784 {
785 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
786 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
787 }
788 else
789 {
790 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
791 if (curr_mdl != page_buf->mdl)
792 {
793 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
794 IoFreeMdl(curr_mdl);
795 }
796 put_pb_on_freelist(xi, page_buf);
797 }
798 curr_mdl = next_mdl;
799 page_buf = next_buf;
800 }
802 NdisFreeNetBuffer(curr_nb);
803 InterlockedDecrement(&xi->rx_outstanding);
805 curr_nb = next_nb;
806 }
807 NdisFreeNetBufferList(curr_nbl);
808 curr_nbl = next_nbl;
809 }
811 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
812 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
814 //FUNCTION_EXIT();
815 }
816 #endif
818 /* We limit the number of packets per interrupt so that acks get a chance
819 under high rx load. The DPC is immediately re-scheduled */
821 #define MAXIMUM_PACKETS_PER_INDICATE 32
823 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
824 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
826 // Called at DISPATCH_LEVEL
827 BOOLEAN
828 XenNet_RxBufferCheck(struct xennet_info *xi)
829 {
830 RING_IDX cons, prod;
831 ULONG packet_count = 0;
832 ULONG packet_data = 0;
833 ULONG buffer_count = 0;
834 USHORT id;
835 int more_to_do = FALSE;
836 shared_buffer_t *page_buf;
837 #if NTDDI_VERSION < NTDDI_VISTA
838 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
839 PNDIS_PACKET first_header_only_packet;
840 PNDIS_PACKET last_header_only_packet;
841 #else
842 #endif
843 //ULONG nbl_count = 0;
844 ULONG interim_packet_data = 0;
845 struct netif_extra_info *ei;
846 rx_context_t rc;
847 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
848 shared_buffer_t *head_buf = NULL;
849 shared_buffer_t *tail_buf = NULL;
850 shared_buffer_t *last_buf = NULL;
851 BOOLEAN extra_info_flag = FALSE;
852 BOOLEAN more_data_flag = FALSE;
853 BOOLEAN dont_set_event;
854 //FUNCTION_ENTER();
856 #if NTDDI_VERSION < NTDDI_VISTA
857 rc.first_packet = NULL;
858 rc.last_packet = NULL;
859 rc.packet_count = 0;
860 #else
861 rc.first_nbl = NULL;
862 rc.last_nbl = NULL;
863 rc.packet_count = 0;
864 rc.nbl_count = 0;
865 #endif
867 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
868 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
870 if (xi->device_state != DEVICE_STATE_ACTIVE) {
871 /* there is a chance that our Dpc had been queued just before the shutdown... */
872 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
873 return FALSE;
874 }
876 if (xi->rx_partial_buf) {
877 head_buf = xi->rx_partial_buf;
878 tail_buf = xi->rx_partial_buf;
879 while (tail_buf->next)
880 tail_buf = tail_buf->next;
881 more_data_flag = xi->rx_partial_more_data_flag;
882 extra_info_flag = xi->rx_partial_extra_info_flag;
883 xi->rx_partial_buf = NULL;
884 }
886 do {
887 prod = xi->rx_ring.sring->rsp_prod;
888 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
890 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
891 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
892 page_buf = xi->rx_ring_pbs[id];
893 XN_ASSERT(page_buf);
894 xi->rx_ring_pbs[id] = NULL;
895 xi->rx_id_free++;
896 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
897 if (!extra_info_flag) {
898 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
899 KdPrint((__DRIVER_NAME " Error: rsp offset %d, size %d\n",
900 page_buf->rsp.offset, page_buf->rsp.status));
901 XN_ASSERT(!extra_info_flag);
902 put_pb_on_freelist(xi, page_buf);
903 continue;
904 }
905 }
907 if (!head_buf) {
908 head_buf = page_buf;
909 tail_buf = page_buf;
910 } else {
911 tail_buf->next = page_buf;
912 tail_buf = page_buf;
913 }
914 page_buf->next = NULL;
916 if (extra_info_flag) {
917 ei = (struct netif_extra_info *)&page_buf->rsp;
918 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
919 } else {
920 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
921 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
922 interim_packet_data += page_buf->rsp.status;
923 }
925 if (!extra_info_flag && !more_data_flag) {
926 last_buf = page_buf;
927 packet_count++;
928 packet_data += interim_packet_data;
929 interim_packet_data = 0;
930 }
931 buffer_count++;
932 }
933 xi->rx_ring.rsp_cons = cons;
935 /* Give netback more buffers */
936 XenNet_FillRing(xi);
938 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
939 break;
941 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
942 if (!more_to_do) {
943 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
944 KeMemoryBarrier();
945 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
946 }
947 } while (more_to_do);
949 /* anything past last_buf belongs to an incomplete packet... */
950 if (last_buf && last_buf->next)
951 {
952 KdPrint((__DRIVER_NAME " Partial receive\n"));
953 xi->rx_partial_buf = last_buf->next;
954 xi->rx_partial_more_data_flag = more_data_flag;
955 xi->rx_partial_extra_info_flag = extra_info_flag;
956 last_buf->next = NULL;
957 }
959 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
961 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
962 {
963 /* fire again immediately */
964 KdPrint((__DRIVER_NAME " Dpc Duration Exceeded\n"));
965 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
966 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
967 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
968 /* dont set an event in TX path */
969 dont_set_event = TRUE;
970 }
971 else
972 {
973 /* make sure the Dpc queue is run immediately next interrupt */
974 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
975 /* set an event in TX path */
976 dont_set_event = FALSE;
977 }
979 /* make packets out of the buffers */
980 page_buf = head_buf;
981 extra_info_flag = FALSE;
982 more_data_flag = FALSE;
984 while (page_buf) {
985 shared_buffer_t *next_buf = page_buf->next;
986 PMDL mdl;
988 page_buf->next = NULL;
989 if (extra_info_flag) {
990 //KdPrint((__DRIVER_NAME " processing extra info\n"));
991 ei = (struct netif_extra_info *)&page_buf->rsp;
992 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
993 switch (ei->type)
994 {
995 case XEN_NETIF_EXTRA_TYPE_GSO:
996 switch (ei->u.gso.type) {
997 case XEN_NETIF_GSO_TYPE_TCPV4:
998 pi->mss = ei->u.gso.size;
999 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1000 break;
1001 default:
1002 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
1003 break;
1005 break;
1006 default:
1007 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
1008 break;
1010 put_pb_on_freelist(xi, page_buf);
1011 } else {
1012 XN_ASSERT(!page_buf->rsp.offset);
1013 if (!more_data_flag) { // handling the packet's 1st buffer
1014 if (page_buf->rsp.flags & NETRXF_csum_blank)
1015 pi->csum_blank = TRUE;
1016 if (page_buf->rsp.flags & NETRXF_data_validated)
1017 pi->data_validated = TRUE;
1019 mdl = page_buf->mdl;
1020 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1021 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1022 if (pi->first_pb) {
1023 XN_ASSERT(pi->curr_pb);
1024 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1025 pi->curr_pb->next = page_buf;
1026 pi->curr_pb = page_buf;
1027 XN_ASSERT(pi->curr_mdl);
1028 pi->curr_mdl->Next = mdl;
1029 pi->curr_mdl = mdl;
1030 } else {
1031 pi->first_pb = page_buf;
1032 pi->curr_pb = page_buf;
1033 pi->first_mdl = mdl;
1034 pi->curr_mdl = mdl;
1036 //pi->mdl_count++;
1037 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1038 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1039 pi->total_length = pi->total_length + page_buf->rsp.status;
1042 /* Packet done, add it to the list */
1043 if (!more_data_flag && !extra_info_flag) {
1044 pi->curr_pb = pi->first_pb;
1045 pi->curr_mdl = pi->first_mdl;
1046 XenNet_MakePackets(xi, &rc, pi);
1049 page_buf = next_buf;
1051 XN_ASSERT(!more_data_flag && !extra_info_flag);
1053 #if NTDDI_VERSION < NTDDI_VISTA
1054 packet_count = 0;
1055 first_header_only_packet = NULL;
1056 last_header_only_packet = NULL;
1058 while (rc.first_packet) {
1059 PNDIS_PACKET packet;
1060 NDIS_STATUS status;
1062 packet = rc.first_packet;
1063 XN_ASSERT(PACKET_FIRST_PB(packet));
1064 rc.first_packet = PACKET_NEXT_PACKET(packet);
1065 status = NDIS_GET_PACKET_STATUS(packet);
1066 if (status == NDIS_STATUS_RESOURCES) {
1067 if (!first_header_only_packet) {
1068 first_header_only_packet = packet;
1069 } else {
1070 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1072 last_header_only_packet = packet;
1073 PACKET_NEXT_PACKET(packet) = NULL;
1075 packets[packet_count++] = packet;
1076 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1077 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1078 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1079 && status == NDIS_STATUS_RESOURCES)) {
1080 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1081 packet_count = 0;
1084 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1085 while (first_header_only_packet) {
1086 PNDIS_PACKET packet = first_header_only_packet;
1087 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1088 XenNet_ReturnPacket(xi, packet);
1090 #else
1091 if (rc.first_nbl) {
1092 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1093 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1094 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1095 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1096 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1098 #endif
1099 //FUNCTION_EXIT();
1100 return dont_set_event;
1103 static VOID
1104 XenNet_BufferFree(xennet_info_t *xi)
1106 shared_buffer_t *sb;
1107 int i;
1109 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1110 if (xi->rx_ring_pbs[i] != NULL) {
1111 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1112 xi->rx_ring_pbs[i] = NULL;
1116 /* because we are shutting down this won't allocate new ones */
1117 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1118 XnEndAccess(xi->handle,
1119 sb->gref, FALSE, (ULONG)'XNRX');
1120 IoFreeMdl(sb->mdl);
1121 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1122 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1124 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1125 IoFreeMdl(sb->mdl);
1126 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1130 BOOLEAN
1131 XenNet_RxInit(xennet_info_t *xi) {
1132 #if NTDDI_VERSION < NTDDI_VISTA
1133 NDIS_STATUS status;
1134 #else
1135 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1136 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1137 #endif
1138 int ret;
1139 int i;
1141 FUNCTION_ENTER();
1143 // this stuff needs to be done once only...
1144 KeInitializeSpinLock(&xi->rx_lock);
1145 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1146 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1147 if (!xi->rxpi) {
1148 KdPrint(("ExAllocatePoolWithTagPriority failed\n"));
1149 return FALSE;
1151 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1153 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1154 if (!ret) {
1155 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1156 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1157 return FALSE;
1159 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1160 if (!ret) {
1161 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1162 stack_delete(xi->rx_pb_stack, NULL, NULL);
1163 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1164 return FALSE;
1167 xi->rx_id_free = NET_RX_RING_SIZE;
1168 xi->rx_outstanding = 0;
1170 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1171 xi->rx_ring_pbs[i] = NULL;
1174 #if NTDDI_VERSION < NTDDI_VISTA
1175 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1176 if (status != NDIS_STATUS_SUCCESS) {
1177 KdPrint(("NdisAllocatePacketPool failed with 0x%x\n", status));
1178 return FALSE;
1180 #else
1181 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1182 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1183 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1184 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1185 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1186 nbl_pool_parameters.ContextSize = 0;
1187 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1188 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1190 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1191 if (!xi->rx_nbl_pool) {
1192 KdPrint(("NdisAllocateNetBufferListPool failed\n"));
1193 return FALSE;
1196 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1197 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1198 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1199 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1200 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1201 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1202 if (!xi->rx_packet_pool) {
1203 KdPrint(("NdisAllocateNetBufferPool (rx_packet_pool) failed\n"));
1204 return FALSE;
1206 #endif
1207 XenNet_FillRing(xi);
1209 FUNCTION_EXIT();
1211 return TRUE;
1214 VOID
1215 XenNet_RxShutdown(xennet_info_t *xi) {
1216 KIRQL old_irql;
1217 UNREFERENCED_PARAMETER(xi);
1219 FUNCTION_ENTER();
1221 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1222 while (xi->rx_outstanding) {
1223 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1224 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1225 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1226 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1228 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1230 XenNet_BufferFree(xi);
1232 stack_delete(xi->rx_pb_stack, NULL, NULL);
1233 stack_delete(xi->rx_hb_stack, NULL, NULL);
1236 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1238 #if NTDDI_VERSION < NTDDI_VISTA
1239 NdisFreePacketPool(xi->rx_packet_pool);
1240 #else
1241 NdisFreeNetBufferPool(xi->rx_packet_pool);
1242 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1243 #endif
1245 FUNCTION_EXIT();
1246 return;