win-pvdrivers

view xennet/xennet_rx.c @ 1068:ebfa9417f1ee

Remove packet stats debug from xennet_rx
author James Harper <james.harper@bendigoit.com.au>
date Tue Oct 29 19:55:25 2013 +1100 (2013-10-29)
parents 2ef536c2d9fe
children 05ece536b204
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static __inline shared_buffer_t *
24 get_pb_from_freelist(struct xennet_info *xi)
25 {
26 shared_buffer_t *pb;
27 PVOID ptr_ref;
29 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
30 {
31 pb = ptr_ref;
32 pb->ref_count = 1;
33 InterlockedDecrement(&xi->rx_pb_free);
34 return pb;
35 }
37 /* don't allocate a new one if we are shutting down */
38 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
39 return NULL;
41 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
42 if (!pb)
43 return NULL;
44 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
45 if (!pb->virtual)
46 {
47 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
48 return NULL;
49 }
50 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
51 if (!pb->mdl)
52 {
53 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
54 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
55 return NULL;
56 }
57 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
58 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
59 if (pb->gref == INVALID_GRANT_REF)
60 {
61 IoFreeMdl(pb->mdl);
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 MmBuildMdlForNonPagedPool(pb->mdl);
67 pb->ref_count = 1;
68 return pb;
69 }
71 static __inline VOID
72 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
73 {
74 UNREFERENCED_PARAMETER(xi);
75 InterlockedIncrement(&pb->ref_count);
76 }
78 static __inline VOID
79 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
80 {
81 if (InterlockedDecrement(&pb->ref_count) == 0)
82 {
83 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
84 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
85 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
86 {
87 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
88 IoFreeMdl(pb->mdl);
89 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
90 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
91 return;
92 }
93 pb->mdl->ByteCount = PAGE_SIZE;
94 pb->mdl->Next = NULL;
95 pb->next = NULL;
96 stack_push(xi->rx_pb_stack, pb);
97 InterlockedIncrement(&xi->rx_pb_free);
98 }
99 }
101 static __inline shared_buffer_t *
102 get_hb_from_freelist(struct xennet_info *xi)
103 {
104 shared_buffer_t *hb;
105 PVOID ptr_ref;
107 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
108 {
109 hb = ptr_ref;
110 InterlockedDecrement(&xi->rx_hb_free);
111 return hb;
112 }
114 /* don't allocate a new one if we are shutting down */
115 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
116 return NULL;
118 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
119 if (!hb)
120 return NULL;
121 NdisZeroMemory(hb, sizeof(shared_buffer_t));
122 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
123 if (!hb->mdl) {
124 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
125 return NULL;
126 }
127 MmBuildMdlForNonPagedPool(hb->mdl);
128 return hb;
129 }
131 static __inline VOID
132 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
133 {
134 XN_ASSERT(xi);
135 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
136 hb->mdl->Next = NULL;
137 hb->next = NULL;
138 stack_push(xi->rx_hb_stack, hb);
139 InterlockedIncrement(&xi->rx_hb_free);
140 }
142 // Called at DISPATCH_LEVEL with rx lock held
143 static VOID
144 XenNet_FillRing(struct xennet_info *xi)
145 {
146 unsigned short id;
147 shared_buffer_t *page_buf;
148 ULONG i, notify;
149 ULONG batch_target;
150 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
151 netif_rx_request_t *req;
153 //FUNCTION_ENTER();
155 if (xi->device_state != DEVICE_STATE_ACTIVE)
156 return;
158 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
160 if (batch_target < (xi->rx_target >> 2)) {
161 //FUNCTION_EXIT();
162 return; /* only refill if we are less than 3/4 full already */
163 }
165 for (i = 0; i < batch_target; i++) {
166 page_buf = get_pb_from_freelist(xi);
167 if (!page_buf) {
168 FUNCTION_MSG("Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target);
169 break;
170 }
171 xi->rx_id_free--;
173 /* Give to netback */
174 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
175 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
176 xi->rx_ring_pbs[id] = page_buf;
177 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
178 req->id = id;
179 req->gref = page_buf->gref;
180 XN_ASSERT(req->gref != INVALID_GRANT_REF);
181 }
182 KeMemoryBarrier();
183 xi->rx_ring.req_prod_pvt = req_prod + i;
184 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
185 if (notify) {
186 XnNotify(xi->handle, xi->event_channel);
187 }
189 //FUNCTION_EXIT();
191 return;
192 }
194 #if NTDDI_VERSION < NTDDI_VISTA
195 typedef struct {
196 PNDIS_PACKET first_packet;
197 PNDIS_PACKET last_packet;
198 ULONG packet_count;
199 } rx_context_t;
200 #else
201 typedef struct {
202 PNET_BUFFER_LIST first_nbl;
203 PNET_BUFFER_LIST last_nbl;
204 ULONG packet_count;
205 ULONG nbl_count;
206 } rx_context_t;
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 /*
211 NDIS5 appears to insist that the checksum on received packets is correct, and won't
212 believe us when we lie about it, which happens when the packet is generated on the
213 same bridge in Dom0. Doh!
214 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
215 */
217 static BOOLEAN
218 XenNet_SumPacketData(
219 packet_info_t *pi,
220 PNDIS_PACKET packet,
221 BOOLEAN set_csum) {
222 USHORT i;
223 PUCHAR buffer;
224 PMDL mdl;
225 UINT total_length;
226 UINT data_length;
227 UINT buffer_length;
228 USHORT buffer_offset;
229 ULONG csum;
230 PUSHORT csum_ptr;
231 USHORT remaining;
232 USHORT ip4_length;
233 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
235 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
236 if (!buffer) {
237 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
238 return FALSE;
239 }
240 XN_ASSERT(mdl);
242 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
243 data_length = ip4_length + XN_HDR_SIZE;
245 if ((USHORT)data_length > total_length) {
246 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
247 return FALSE;
248 }
250 switch (pi->ip_proto) {
251 case 6:
252 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
253 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
254 break;
255 case 17:
256 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
257 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
258 break;
259 default:
260 FUNCTION_MSG("Don't know how to calc sum for IP Proto %d\n", pi->ip_proto);
261 //FUNCTION_EXIT();
262 return FALSE; // should never happen
263 }
265 if (set_csum)
266 *csum_ptr = 0;
268 csum = 0;
269 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
270 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
271 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
273 remaining = ip4_length - pi->ip4_header_length;
275 csum += remaining;
277 csum_span = FALSE;
278 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
279 while (i < data_length) {
280 /* don't include the checksum field itself in the calculation */
281 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
282 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
283 i += 2;
284 buffer_offset += 2;
285 continue;
286 }
287 if (csum_span) {
288 /* the other half of the next bit */
289 XN_ASSERT(buffer_offset == 0);
290 csum += (USHORT)buffer[buffer_offset];
291 csum_span = FALSE;
292 i += 1;
293 buffer_offset += 1;
294 } else if (buffer_offset == buffer_length - 1) {
295 /* deal with a buffer ending on an odd byte boundary */
296 csum += (USHORT)buffer[buffer_offset] << 8;
297 csum_span = TRUE;
298 i += 1;
299 buffer_offset += 1;
300 } else {
301 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
302 i += 2;
303 buffer_offset += 2;
304 }
305 if (buffer_offset == buffer_length && i < total_length) {
306 NdisGetNextBuffer(mdl, &mdl);
307 if (mdl == NULL) {
308 FUNCTION_MSG(__DRIVER_NAME " Ran out of buffers\n");
309 return FALSE; // should never happen
310 }
311 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
312 XN_ASSERT(buffer_length);
313 buffer_offset = 0;
314 }
315 }
317 while (csum & 0xFFFF0000)
318 csum = (csum & 0xFFFF) + (csum >> 16);
320 if (set_csum) {
321 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
322 } else {
323 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
324 }
325 return TRUE;
326 }
327 #endif
329 static BOOLEAN
330 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
331 #if NTDDI_VERSION < NTDDI_VISTA
332 NDIS_STATUS status;
333 PNDIS_PACKET packet;
334 #else
335 PNET_BUFFER_LIST nbl;
336 PNET_BUFFER packet;
337 #endif
338 PMDL mdl_head, mdl_tail, curr_mdl;
339 PUCHAR header_va;
340 ULONG out_remaining;
341 ULONG header_extra;
342 shared_buffer_t *header_buf;
343 ULONG outstanding;
344 #if NTDDI_VERSION < NTDDI_VISTA
345 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
346 //UINT packet_length;
347 #else
348 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
349 #endif
350 //FUNCTION_ENTER();
352 #if NTDDI_VERSION < NTDDI_VISTA
353 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
354 if (status != NDIS_STATUS_SUCCESS) {
355 FUNCTION_MSG("No free packets\n");
356 return FALSE;
357 }
359 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
360 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
361 #else
362 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
363 if (!nbl) {
364 /* buffers will be freed in MakePackets */
365 FUNCTION_MSG("No free nbls\n");
366 //FUNCTION_EXIT();
367 return FALSE;
368 }
370 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
371 if (!packet) {
372 FUNCTION_MSG("No free packets\n");
373 NdisFreeNetBufferList(nbl);
374 //FUNCTION_EXIT();
375 return FALSE;
376 }
377 #endif
379 if (!pi->first_mdl->Next && !pi->split_required) {
380 /* a single buffer <= MTU */
381 header_buf = NULL;
382 XenNet_BuildHeader(pi, pi->first_mdl_virtual, pi->first_mdl_length);
383 #if NTDDI_VERSION < NTDDI_VISTA
384 NdisChainBufferAtBack(packet, pi->first_mdl);
385 PACKET_FIRST_PB(packet) = pi->first_pb;
386 #else
387 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
388 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
389 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
390 NET_BUFFER_DATA_OFFSET(packet) = 0;
391 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
392 NB_FIRST_PB(packet) = pi->first_pb;
393 #endif
394 ref_pb(xi, pi->first_pb);
395 } else {
396 XN_ASSERT(ndis_os_minor_version >= 1);
397 header_buf = get_hb_from_freelist(xi);
398 if (!header_buf) {
399 FUNCTION_MSG("No free header buffers\n");
400 #if NTDDI_VERSION < NTDDI_VISTA
401 NdisUnchainBufferAtFront(packet, &curr_mdl);
402 NdisFreePacket(packet);
403 #else
404 NdisFreeNetBufferList(nbl);
405 NdisFreeNetBuffer(packet);
406 #endif
407 return FALSE;
408 }
409 header_va = (PUCHAR)(header_buf + 1);
410 NdisMoveMemory(header_va, pi->header, pi->header_length);
411 //if (pi->ip_proto == 50) {
412 // FUNCTION_MSG("header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead);
413 // FUNCTION_MSG("ip4_header_length = %d\n", pi->ip4_header_length);
414 // FUNCTION_MSG("tcp_header_length = %d\n", pi->tcp_header_length);
415 //}
416 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
417 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
418 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
419 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
420 header_buf->mdl->ByteCount = pi->header_length;
421 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
422 #if NTDDI_VERSION < NTDDI_VISTA
423 PACKET_FIRST_PB(packet) = header_buf;
424 header_buf->next = pi->curr_pb;
425 NdisChainBufferAtBack(packet, mdl_head);
426 #else
427 NB_FIRST_PB(packet) = header_buf;
428 header_buf->next = pi->curr_pb;
429 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
430 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
431 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
432 NET_BUFFER_DATA_OFFSET(packet) = 0;
433 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
434 #endif
436 if (pi->split_required) {
437 /* must be ip4 */
438 ULONG tcp_length;
439 USHORT new_ip4_length;
440 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
441 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
442 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
443 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
444 pi->tcp_seq += tcp_length;
445 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
446 /* part of the packet is already present in the header buffer for lookahead */
447 out_remaining = tcp_length - header_extra;
448 XN_ASSERT((LONG)out_remaining >= 0);
449 } else {
450 out_remaining = pi->total_length - pi->header_length;
451 XN_ASSERT((LONG)out_remaining >= 0);
452 }
454 while (out_remaining != 0) {
455 //ULONG in_buffer_offset;
456 ULONG in_buffer_length;
457 ULONG out_length;
459 //if (pi->ip_proto == 50) {
460 // FUNCTION_MSG("in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb);
461 //}
462 if (!pi->curr_mdl || !pi->curr_pb) {
463 FUNCTION_MSG("out of buffers for packet\n");
464 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
465 // TODO: free some stuff or we'll leak
466 /* unchain buffers then free packet */
467 //FUNCTION_EXIT();
468 return FALSE;
469 }
471 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
472 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
473 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
474 XN_ASSERT(curr_mdl);
475 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
476 mdl_tail->Next = curr_mdl;
477 mdl_tail = curr_mdl;
478 curr_mdl->Next = NULL; /* I think this might be redundant */
479 #if NTDDI_VERSION < NTDDI_VISTA
480 #else
481 NET_BUFFER_DATA_LENGTH(packet) += out_length;
482 #endif
483 ref_pb(xi, pi->curr_pb);
484 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
485 if (pi->curr_mdl_offset == in_buffer_length) {
486 pi->curr_mdl = pi->curr_mdl->Next;
487 pi->curr_pb = pi->curr_pb->next;
488 pi->curr_mdl_offset = 0;
489 }
490 out_remaining -= out_length;
491 }
492 #if NTDDI_VERSION < NTDDI_VISTA
493 if (pi->split_required) {
494 // TODO: only if Ip checksum is disabled...
495 XenNet_SumIpHeader(header_va, pi->ip4_header_length);
496 }
497 #endif
498 if (header_extra > 0)
499 pi->header_length -= header_extra;
500 }
502 rc->packet_count++;
503 #if NTDDI_VERSION < NTDDI_VISTA
504 #else
505 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
506 #endif
508 if (pi->parse_result == PARSE_OK) {
509 #if NTDDI_VERSION < NTDDI_VISTA
510 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
511 packet, TcpIpChecksumPacketInfo);
512 XN_ASSERT(csum_info->Value == 0);
513 if (pi->csum_blank || pi->data_validated || pi->split_required) {
514 BOOLEAN checksum_offload = FALSE;
515 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
516 if (xi->setting_csum.V4Receive.IpChecksum) {
517 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
518 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
519 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
520 else
521 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
522 }
523 }
524 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
525 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
526 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
527 checksum_offload = TRUE;
528 }
529 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
530 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
531 checksum_offload = TRUE;
532 }
533 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
534 XenNet_SumPacketData(pi, packet, TRUE);
535 }
536 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
537 if (xi->setting_csum.V4Receive.IpChecksum) {
538 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
539 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
540 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
541 else
542 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
543 }
544 }
545 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
546 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
547 if (XenNet_SumPacketData(pi, packet, FALSE)) {
548 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
549 } else {
550 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
551 }
552 }
553 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
554 if (XenNet_SumPacketData(pi, packet, FALSE)) {
555 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
556 } else {
557 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
558 }
559 }
560 }
561 #else
562 csum_info.Value = 0;
563 if (pi->csum_blank || pi->data_validated || pi->mss) {
564 if (pi->ip_proto == 6) {
565 csum_info.Receive.IpChecksumSucceeded = TRUE;
566 csum_info.Receive.TcpChecksumSucceeded = TRUE;
567 } else if (pi->ip_proto == 17) {
568 csum_info.Receive.IpChecksumSucceeded = TRUE;
569 csum_info.Receive.UdpChecksumSucceeded = TRUE;
570 }
571 }
572 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
573 #endif
574 }
576 #if NTDDI_VERSION < NTDDI_VISTA
577 if (!rc->first_packet) {
578 rc->first_packet = packet;
579 } else {
580 PACKET_NEXT_PACKET(rc->last_packet) = packet;
581 }
582 rc->last_packet = packet;
583 rc->packet_count++;
584 #else
585 if (!rc->first_nbl) {
586 rc->first_nbl = nbl;
587 } else {
588 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
589 }
590 rc->last_nbl = nbl;
591 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
592 rc->nbl_count++;
593 if (pi->is_multicast) {
594 /* multicast */
595 xi->stats.ifHCInMulticastPkts++;
596 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
597 } else if (pi->is_broadcast) {
598 /* broadcast */
599 xi->stats.ifHCInBroadcastPkts++;
600 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
601 } else {
602 /* unicast */
603 xi->stats.ifHCInUcastPkts++;
604 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
605 }
606 #endif
608 outstanding = InterlockedIncrement(&xi->rx_outstanding);
609 #if NTDDI_VERSION < NTDDI_VISTA
610 if (outstanding > RX_PACKET_HIGH_WATER_MARK || !xi->rx_pb_free) {
611 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
612 } else {
613 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
614 }
615 #if 0
616 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
617 NdisQueryPacketLength(packet, &packet_length);
618 if (pi->parse_result != PARSE_OK || (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH))
619 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
620 else
621 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
622 #endif
623 #endif
625 //FUNCTION_EXIT();
626 return TRUE;
627 }
629 static VOID
630 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
631 {
632 UCHAR psh;
633 shared_buffer_t *page_buf;
635 XenNet_ParsePacketHeader(pi, NULL, XN_HDR_SIZE + xi->current_lookahead);
637 if (!XenNet_FilterAcceptPacket(xi, pi)) {
638 goto done;
639 }
641 if (pi->split_required) {
642 #if NTDDI_VERSION < NTDDI_VISTA
643 /* need to split to mss for NDIS5 */
644 #else
645 switch (xi->current_gso_rx_split_type) {
646 case RX_LSO_SPLIT_HALF:
647 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
648 break;
649 case RX_LSO_SPLIT_NONE:
650 pi->mss = 65535;
651 break;
652 }
653 #endif
654 }
656 switch (pi->ip_proto) {
657 case 6: // TCP
658 if (pi->split_required)
659 break;
660 /* fall through */
661 case 17: // UDP
662 if (!XenNet_MakePacket(xi, rc, pi)) {
663 FUNCTION_MSG("Failed to make packet\n");
664 #if NTDDI_VERSION < NTDDI_VISTA
665 xi->stat_rx_no_buffer++;
666 #else
667 xi->stats.ifInDiscards++;
668 #endif
669 goto done;
670 }
671 goto done;
672 default:
673 if (!XenNet_MakePacket(xi, rc, pi)) {
674 FUNCTION_MSG("Failed to make packet\n");
675 #if NTDDI_VERSION < NTDDI_VISTA
676 xi->stat_rx_no_buffer++;
677 #else
678 xi->stats.ifInDiscards++;
679 #endif
680 goto done;
681 }
682 goto done;
683 }
685 /* this is the split_required code */
686 pi->tcp_remaining = pi->tcp_length;
688 /* we can make certain assumptions here as the following code is only for tcp4 */
689 psh = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] & 8;
690 while (pi->tcp_remaining) {
691 if (!XenNet_MakePacket(xi, rc, pi)) {
692 FUNCTION_MSG("Failed to make packet\n");
693 #if NTDDI_VERSION < NTDDI_VISTA
694 xi->stat_rx_no_buffer++;
695 #else
696 xi->stats.ifInDiscards++;
697 #endif
698 break; /* we are out of memory - just drop the packets */
699 }
700 if (psh) {
701 if (pi->tcp_remaining)
702 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= ~8;
703 else
704 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] |= 8;
705 }
706 }
707 done:
708 page_buf = pi->first_pb;
709 while (page_buf) {
710 shared_buffer_t *next_pb = page_buf->next;
711 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
712 page_buf = next_pb;
713 }
714 XenNet_ClearPacketInfo(pi);
715 //FUNCTION_EXIT();
716 return;
717 }
719 #if NTDDI_VERSION < NTDDI_VISTA
720 /* called at DISPATCH_LEVEL */
721 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
722 VOID
723 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
724 struct xennet_info *xi = adapter_context;
725 PNDIS_BUFFER buffer;
726 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
728 //FUNCTION_ENTER();
729 NdisUnchainBufferAtFront(packet, &buffer);
731 while (buffer) {
732 shared_buffer_t *next_buf;
733 XN_ASSERT(page_buf);
734 next_buf = page_buf->next;
735 if (!page_buf->virtual) {
736 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
737 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
738 } else {
739 if (buffer != page_buf->mdl)
740 NdisFreeBuffer(buffer);
741 put_pb_on_freelist(xi, page_buf);
742 }
743 NdisUnchainBufferAtFront(packet, &buffer);
744 page_buf = next_buf;
745 }
747 NdisFreePacket(packet);
748 InterlockedDecrement(&xi->rx_outstanding);
749 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
750 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
751 //FUNCTION_EXIT();
752 }
753 #else
754 /* called at <= DISPATCH_LEVEL */
755 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
756 VOID
757 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
758 {
759 struct xennet_info *xi = adapter_context;
760 UNREFERENCED_PARAMETER(return_flags);
762 //FUNCTION_ENTER();
764 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
766 XN_ASSERT(xi);
767 while (curr_nbl)
768 {
769 PNET_BUFFER_LIST next_nbl;
770 PNET_BUFFER curr_nb;
772 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
773 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
774 while (curr_nb)
775 {
776 PNET_BUFFER next_nb;
777 PMDL curr_mdl;
778 shared_buffer_t *page_buf;
780 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
781 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
782 page_buf = NB_FIRST_PB(curr_nb);
783 while (curr_mdl)
784 {
785 shared_buffer_t *next_buf;
786 PMDL next_mdl;
788 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
789 next_mdl = curr_mdl->Next;
790 next_buf = page_buf->next;
791 if (!page_buf->virtual)
792 {
793 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
794 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
795 }
796 else
797 {
798 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
799 if (curr_mdl != page_buf->mdl)
800 {
801 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
802 IoFreeMdl(curr_mdl);
803 }
804 put_pb_on_freelist(xi, page_buf);
805 }
806 curr_mdl = next_mdl;
807 page_buf = next_buf;
808 }
810 NdisFreeNetBuffer(curr_nb);
811 InterlockedDecrement(&xi->rx_outstanding);
813 curr_nb = next_nb;
814 }
815 NdisFreeNetBufferList(curr_nbl);
816 curr_nbl = next_nbl;
817 }
819 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
820 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
822 //FUNCTION_EXIT();
823 }
824 #endif
826 /* We limit the number of packets per interrupt so that acks get a chance
827 under high rx load. The DPC is immediately re-scheduled */
829 #define MAXIMUM_PACKETS_PER_INDICATE 32
831 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
832 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
834 // Called at DISPATCH_LEVEL
835 BOOLEAN
836 XenNet_RxBufferCheck(struct xennet_info *xi)
837 {
838 RING_IDX cons, prod;
839 ULONG packet_count = 0;
840 ULONG packet_data = 0;
841 ULONG buffer_count = 0;
842 USHORT id;
843 int more_to_do = FALSE;
844 shared_buffer_t *page_buf;
845 #if NTDDI_VERSION < NTDDI_VISTA
846 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
847 PNDIS_PACKET first_header_only_packet;
848 PNDIS_PACKET last_header_only_packet;
849 #else
850 #endif
851 //ULONG nbl_count = 0;
852 ULONG interim_packet_data = 0;
853 struct netif_extra_info *ei;
854 rx_context_t rc;
855 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
856 shared_buffer_t *head_buf = NULL;
857 shared_buffer_t *tail_buf = NULL;
858 shared_buffer_t *last_buf = NULL;
859 BOOLEAN extra_info_flag = FALSE;
860 BOOLEAN more_data_flag = FALSE;
861 BOOLEAN dont_set_event;
862 //FUNCTION_ENTER();
864 #if NTDDI_VERSION < NTDDI_VISTA
865 rc.first_packet = NULL;
866 rc.last_packet = NULL;
867 rc.packet_count = 0;
868 #else
869 rc.first_nbl = NULL;
870 rc.last_nbl = NULL;
871 rc.packet_count = 0;
872 rc.nbl_count = 0;
873 #endif
875 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
876 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
878 if (xi->device_state != DEVICE_STATE_ACTIVE) {
879 /* there is a chance that our Dpc had been queued just before the shutdown... */
880 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
881 return FALSE;
882 }
884 if (xi->rx_partial_buf) {
885 head_buf = xi->rx_partial_buf;
886 tail_buf = xi->rx_partial_buf;
887 while (tail_buf->next)
888 tail_buf = tail_buf->next;
889 more_data_flag = xi->rx_partial_more_data_flag;
890 extra_info_flag = xi->rx_partial_extra_info_flag;
891 xi->rx_partial_buf = NULL;
892 }
894 do {
895 prod = xi->rx_ring.sring->rsp_prod;
896 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
898 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
899 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
900 page_buf = xi->rx_ring_pbs[id];
901 XN_ASSERT(page_buf);
902 xi->rx_ring_pbs[id] = NULL;
903 xi->rx_id_free++;
904 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
905 if (!extra_info_flag) {
906 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
907 FUNCTION_MSG("Error: rsp offset %d, size %d\n",
908 page_buf->rsp.offset, page_buf->rsp.status);
909 XN_ASSERT(!extra_info_flag);
910 put_pb_on_freelist(xi, page_buf);
911 continue;
912 }
913 }
915 if (!head_buf) {
916 head_buf = page_buf;
917 tail_buf = page_buf;
918 } else {
919 tail_buf->next = page_buf;
920 tail_buf = page_buf;
921 }
922 page_buf->next = NULL;
924 if (extra_info_flag) {
925 ei = (struct netif_extra_info *)&page_buf->rsp;
926 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
927 } else {
928 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
929 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
930 interim_packet_data += page_buf->rsp.status;
931 }
933 if (!extra_info_flag && !more_data_flag) {
934 last_buf = page_buf;
935 packet_count++;
936 packet_data += interim_packet_data;
937 interim_packet_data = 0;
938 }
939 buffer_count++;
940 }
941 xi->rx_ring.rsp_cons = cons;
943 /* Give netback more buffers */
944 XenNet_FillRing(xi);
946 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
947 break;
949 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
950 if (!more_to_do) {
951 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
952 KeMemoryBarrier();
953 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
954 }
955 } while (more_to_do);
957 /* anything past last_buf belongs to an incomplete packet... */
958 if (last_buf && last_buf->next)
959 {
960 FUNCTION_MSG("Partial receive\n");
961 xi->rx_partial_buf = last_buf->next;
962 xi->rx_partial_more_data_flag = more_data_flag;
963 xi->rx_partial_extra_info_flag = extra_info_flag;
964 last_buf->next = NULL;
965 }
967 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
969 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
970 {
971 /* fire again immediately */
972 FUNCTION_MSG("Dpc Duration Exceeded\n");
973 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
974 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
975 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
976 /* dont set an event in TX path */
977 dont_set_event = TRUE;
978 }
979 else
980 {
981 /* make sure the Dpc queue is run immediately next interrupt */
982 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
983 /* set an event in TX path */
984 dont_set_event = FALSE;
985 }
987 /* make packets out of the buffers */
988 page_buf = head_buf;
989 extra_info_flag = FALSE;
990 more_data_flag = FALSE;
992 while (page_buf) {
993 shared_buffer_t *next_buf = page_buf->next;
994 PMDL mdl;
996 page_buf->next = NULL;
997 if (extra_info_flag) {
998 //KdPrint((__DRIVER_NAME " processing extra info\n"));
999 ei = (struct netif_extra_info *)&page_buf->rsp;
1000 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
1001 switch (ei->type)
1003 case XEN_NETIF_EXTRA_TYPE_GSO:
1004 switch (ei->u.gso.type) {
1005 case XEN_NETIF_GSO_TYPE_TCPV4:
1006 pi->mss = ei->u.gso.size;
1007 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1008 break;
1009 default:
1010 FUNCTION_MSG("Unknown GSO type (%d) detected\n", ei->u.gso.type);
1011 break;
1013 break;
1014 default:
1015 FUNCTION_MSG("Unknown extra info type (%d) detected\n", ei->type);
1016 break;
1018 put_pb_on_freelist(xi, page_buf);
1019 } else {
1020 XN_ASSERT(!page_buf->rsp.offset);
1021 if (!more_data_flag) { // handling the packet's 1st buffer
1022 if (page_buf->rsp.flags & NETRXF_csum_blank)
1023 pi->csum_blank = TRUE;
1024 if (page_buf->rsp.flags & NETRXF_data_validated)
1025 pi->data_validated = TRUE;
1027 mdl = page_buf->mdl;
1028 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1029 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1030 if (pi->first_pb) {
1031 XN_ASSERT(pi->curr_pb);
1032 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1033 pi->curr_pb->next = page_buf;
1034 pi->curr_pb = page_buf;
1035 XN_ASSERT(pi->curr_mdl);
1036 pi->curr_mdl->Next = mdl;
1037 pi->curr_mdl = mdl;
1038 } else {
1039 pi->first_pb = page_buf;
1040 pi->curr_pb = page_buf;
1041 pi->first_mdl = mdl;
1042 pi->curr_mdl = mdl;
1044 //pi->mdl_count++;
1045 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1046 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1047 pi->total_length = pi->total_length + page_buf->rsp.status;
1050 /* Packet done, add it to the list */
1051 if (!more_data_flag && !extra_info_flag) {
1052 pi->curr_pb = pi->first_pb;
1053 pi->curr_mdl = pi->first_mdl;
1054 XenNet_MakePackets(xi, &rc, pi);
1057 page_buf = next_buf;
1059 XN_ASSERT(!more_data_flag && !extra_info_flag);
1061 #if NTDDI_VERSION < NTDDI_VISTA
1062 packet_count = 0;
1063 first_header_only_packet = NULL;
1064 last_header_only_packet = NULL;
1066 while (rc.first_packet) {
1067 PNDIS_PACKET packet;
1068 NDIS_STATUS status;
1070 packet = rc.first_packet;
1071 XN_ASSERT(PACKET_FIRST_PB(packet));
1072 rc.first_packet = PACKET_NEXT_PACKET(packet);
1073 status = NDIS_GET_PACKET_STATUS(packet);
1074 if (status == NDIS_STATUS_RESOURCES) {
1075 if (!first_header_only_packet) {
1076 first_header_only_packet = packet;
1077 } else {
1078 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1080 last_header_only_packet = packet;
1081 PACKET_NEXT_PACKET(packet) = NULL;
1083 packets[packet_count++] = packet;
1084 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1085 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1086 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1087 && status == NDIS_STATUS_RESOURCES)) {
1088 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1089 packet_count = 0;
1092 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1093 while (first_header_only_packet) {
1094 PNDIS_PACKET packet = first_header_only_packet;
1095 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1096 XenNet_ReturnPacket(xi, packet);
1098 #else
1099 if (rc.first_nbl) {
1100 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1101 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1102 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1103 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1104 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1106 #endif
1107 //FUNCTION_EXIT();
1108 return dont_set_event;
1111 static VOID
1112 XenNet_BufferFree(xennet_info_t *xi)
1114 shared_buffer_t *sb;
1115 int i;
1117 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1118 if (xi->rx_ring_pbs[i] != NULL) {
1119 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1120 xi->rx_ring_pbs[i] = NULL;
1124 /* because we are shutting down this won't allocate new ones */
1125 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1126 XnEndAccess(xi->handle,
1127 sb->gref, FALSE, (ULONG)'XNRX');
1128 IoFreeMdl(sb->mdl);
1129 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1130 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1132 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1133 IoFreeMdl(sb->mdl);
1134 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1138 BOOLEAN
1139 XenNet_RxInit(xennet_info_t *xi) {
1140 #if NTDDI_VERSION < NTDDI_VISTA
1141 NDIS_STATUS status;
1142 #else
1143 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1144 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1145 #endif
1146 int ret;
1147 int i;
1149 FUNCTION_ENTER();
1151 // this stuff needs to be done once only...
1152 KeInitializeSpinLock(&xi->rx_lock);
1153 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1154 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1155 if (!xi->rxpi) {
1156 FUNCTION_MSG("ExAllocatePoolWithTagPriority failed\n");
1157 return FALSE;
1159 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1161 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1162 if (!ret) {
1163 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1164 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1165 return FALSE;
1167 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1168 if (!ret) {
1169 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1170 stack_delete(xi->rx_pb_stack, NULL, NULL);
1171 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1172 return FALSE;
1175 xi->rx_id_free = NET_RX_RING_SIZE;
1176 xi->rx_outstanding = 0;
1178 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1179 xi->rx_ring_pbs[i] = NULL;
1182 #if NTDDI_VERSION < NTDDI_VISTA
1183 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1184 if (status != NDIS_STATUS_SUCCESS) {
1185 FUNCTION_MSG("NdisAllocatePacketPool failed with 0x%x\n", status);
1186 return FALSE;
1188 #else
1189 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1190 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1191 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1192 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1193 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1194 nbl_pool_parameters.ContextSize = 0;
1195 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1196 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1198 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1199 if (!xi->rx_nbl_pool) {
1200 FUNCTION_MSG("NdisAllocateNetBufferListPool failed\n");
1201 return FALSE;
1204 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1205 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1206 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1207 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1208 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1209 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1210 if (!xi->rx_packet_pool) {
1211 FUNCTION_MSG("NdisAllocateNetBufferPool (rx_packet_pool) failed\n");
1212 return FALSE;
1214 #endif
1215 XenNet_FillRing(xi);
1217 FUNCTION_EXIT();
1219 return TRUE;
1222 VOID
1223 XenNet_RxShutdown(xennet_info_t *xi) {
1224 KIRQL old_irql;
1225 UNREFERENCED_PARAMETER(xi);
1227 FUNCTION_ENTER();
1229 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1230 while (xi->rx_outstanding) {
1231 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1232 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1233 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1234 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1236 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1238 XenNet_BufferFree(xi);
1240 stack_delete(xi->rx_pb_stack, NULL, NULL);
1241 stack_delete(xi->rx_hb_stack, NULL, NULL);
1244 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1246 #if NTDDI_VERSION < NTDDI_VISTA
1247 NdisFreePacketPool(xi->rx_packet_pool);
1248 #else
1249 NdisFreeNetBufferPool(xi->rx_packet_pool);
1250 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1251 #endif
1253 FUNCTION_EXIT();
1254 return;