win-pvdrivers

view xennet/xennet_rx.c @ 1070:05ece536b204

Fix LSO bug on FIN packets. Add RxCoalesce option (default on) to work around Cisco VPN issues
author James Harper <james.harper@bendigoit.com.au>
date Wed Nov 13 07:56:13 2013 +1100 (2013-11-13)
parents ebfa9417f1ee
children 5fa56ef930bf
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static __inline shared_buffer_t *
24 get_pb_from_freelist(struct xennet_info *xi)
25 {
26 shared_buffer_t *pb;
27 PVOID ptr_ref;
29 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
30 {
31 pb = ptr_ref;
32 pb->ref_count = 1;
33 InterlockedDecrement(&xi->rx_pb_free);
34 return pb;
35 }
37 /* don't allocate a new one if we are shutting down */
38 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
39 return NULL;
41 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
42 if (!pb)
43 return NULL;
44 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
45 if (!pb->virtual)
46 {
47 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
48 return NULL;
49 }
50 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
51 if (!pb->mdl)
52 {
53 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
54 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
55 return NULL;
56 }
57 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
58 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
59 if (pb->gref == INVALID_GRANT_REF)
60 {
61 IoFreeMdl(pb->mdl);
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 MmBuildMdlForNonPagedPool(pb->mdl);
67 pb->ref_count = 1;
68 return pb;
69 }
71 static __inline VOID
72 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
73 {
74 UNREFERENCED_PARAMETER(xi);
75 InterlockedIncrement(&pb->ref_count);
76 }
78 static __inline VOID
79 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
80 {
81 if (InterlockedDecrement(&pb->ref_count) == 0)
82 {
83 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
84 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
85 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
86 {
87 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
88 IoFreeMdl(pb->mdl);
89 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
90 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
91 return;
92 }
93 pb->mdl->ByteCount = PAGE_SIZE;
94 pb->mdl->Next = NULL;
95 pb->next = NULL;
96 stack_push(xi->rx_pb_stack, pb);
97 InterlockedIncrement(&xi->rx_pb_free);
98 }
99 }
101 static __inline shared_buffer_t *
102 get_hb_from_freelist(struct xennet_info *xi)
103 {
104 shared_buffer_t *hb;
105 PVOID ptr_ref;
107 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
108 {
109 hb = ptr_ref;
110 InterlockedDecrement(&xi->rx_hb_free);
111 return hb;
112 }
114 /* don't allocate a new one if we are shutting down */
115 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
116 return NULL;
118 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
119 if (!hb)
120 return NULL;
121 NdisZeroMemory(hb, sizeof(shared_buffer_t));
122 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
123 if (!hb->mdl) {
124 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
125 return NULL;
126 }
127 MmBuildMdlForNonPagedPool(hb->mdl);
128 return hb;
129 }
131 static __inline VOID
132 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
133 {
134 XN_ASSERT(xi);
135 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
136 hb->mdl->Next = NULL;
137 hb->next = NULL;
138 stack_push(xi->rx_hb_stack, hb);
139 InterlockedIncrement(&xi->rx_hb_free);
140 }
142 // Called at DISPATCH_LEVEL with rx lock held
143 static VOID
144 XenNet_FillRing(struct xennet_info *xi)
145 {
146 unsigned short id;
147 shared_buffer_t *page_buf;
148 ULONG i, notify;
149 ULONG batch_target;
150 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
151 netif_rx_request_t *req;
153 //FUNCTION_ENTER();
155 if (xi->device_state != DEVICE_STATE_ACTIVE)
156 return;
158 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
160 if (batch_target < (xi->rx_target >> 2)) {
161 //FUNCTION_EXIT();
162 return; /* only refill if we are less than 3/4 full already */
163 }
165 for (i = 0; i < batch_target; i++) {
166 page_buf = get_pb_from_freelist(xi);
167 if (!page_buf) {
168 FUNCTION_MSG("Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target);
169 break;
170 }
171 xi->rx_id_free--;
173 /* Give to netback */
174 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
175 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
176 xi->rx_ring_pbs[id] = page_buf;
177 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
178 req->id = id;
179 req->gref = page_buf->gref;
180 XN_ASSERT(req->gref != INVALID_GRANT_REF);
181 }
182 KeMemoryBarrier();
183 xi->rx_ring.req_prod_pvt = req_prod + i;
184 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
185 if (notify) {
186 XnNotify(xi->handle, xi->event_channel);
187 }
189 //FUNCTION_EXIT();
191 return;
192 }
194 #if NTDDI_VERSION < NTDDI_VISTA
195 typedef struct {
196 PNDIS_PACKET first_packet;
197 PNDIS_PACKET last_packet;
198 ULONG packet_count;
199 } rx_context_t;
200 #else
201 typedef struct {
202 PNET_BUFFER_LIST first_nbl;
203 PNET_BUFFER_LIST last_nbl;
204 ULONG packet_count;
205 ULONG nbl_count;
206 } rx_context_t;
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 /*
211 NDIS5 appears to insist that the checksum on received packets is correct, and won't
212 believe us when we lie about it, which happens when the packet is generated on the
213 same bridge in Dom0. Doh!
214 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
215 */
217 static BOOLEAN
218 XenNet_SumPacketData(
219 packet_info_t *pi,
220 PNDIS_PACKET packet,
221 BOOLEAN set_csum) {
222 USHORT i;
223 PUCHAR buffer;
224 PMDL mdl;
225 UINT total_length;
226 UINT data_length;
227 UINT buffer_length;
228 USHORT buffer_offset;
229 ULONG csum;
230 PUSHORT csum_ptr;
231 USHORT remaining;
232 USHORT ip4_length;
233 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
235 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
236 if (!buffer) {
237 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
238 return FALSE;
239 }
240 XN_ASSERT(mdl);
242 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
243 data_length = ip4_length + XN_HDR_SIZE;
245 if ((USHORT)data_length > total_length) {
246 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
247 return FALSE;
248 }
250 switch (pi->ip_proto) {
251 case 6:
252 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
253 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
254 break;
255 case 17:
256 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
257 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
258 break;
259 default:
260 FUNCTION_MSG("Don't know how to calc sum for IP Proto %d\n", pi->ip_proto);
261 //FUNCTION_EXIT();
262 return FALSE; // should never happen
263 }
265 if (set_csum)
266 *csum_ptr = 0;
268 csum = 0;
269 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
270 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
271 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
273 remaining = ip4_length - pi->ip4_header_length;
275 csum += remaining;
277 csum_span = FALSE;
278 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
279 while (i < data_length) {
280 /* don't include the checksum field itself in the calculation */
281 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
282 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
283 i += 2;
284 buffer_offset += 2;
285 continue;
286 }
287 if (csum_span) {
288 /* the other half of the next bit */
289 XN_ASSERT(buffer_offset == 0);
290 csum += (USHORT)buffer[buffer_offset];
291 csum_span = FALSE;
292 i += 1;
293 buffer_offset += 1;
294 } else if (buffer_offset == buffer_length - 1) {
295 /* deal with a buffer ending on an odd byte boundary */
296 csum += (USHORT)buffer[buffer_offset] << 8;
297 csum_span = TRUE;
298 i += 1;
299 buffer_offset += 1;
300 } else {
301 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
302 i += 2;
303 buffer_offset += 2;
304 }
305 if (buffer_offset == buffer_length && i < total_length) {
306 NdisGetNextBuffer(mdl, &mdl);
307 if (mdl == NULL) {
308 FUNCTION_MSG(__DRIVER_NAME " Ran out of buffers\n");
309 return FALSE; // should never happen
310 }
311 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
312 XN_ASSERT(buffer_length);
313 buffer_offset = 0;
314 }
315 }
317 while (csum & 0xFFFF0000)
318 csum = (csum & 0xFFFF) + (csum >> 16);
320 if (set_csum) {
321 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
322 } else {
323 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
324 }
325 return TRUE;
326 }
327 #endif
329 static BOOLEAN
330 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
331 #if NTDDI_VERSION < NTDDI_VISTA
332 NDIS_STATUS status;
333 PNDIS_PACKET packet;
334 #else
335 PNET_BUFFER_LIST nbl;
336 PNET_BUFFER packet;
337 #endif
338 PMDL mdl_head, mdl_tail, curr_mdl;
339 PUCHAR header_va;
340 ULONG out_remaining;
341 ULONG header_extra;
342 shared_buffer_t *header_buf;
343 ULONG outstanding;
344 #if NTDDI_VERSION < NTDDI_VISTA
345 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
346 //UINT packet_length;
347 #else
348 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
349 #endif
350 //FUNCTION_ENTER();
352 #if NTDDI_VERSION < NTDDI_VISTA
353 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
354 if (status != NDIS_STATUS_SUCCESS) {
355 FUNCTION_MSG("No free packets\n");
356 return FALSE;
357 }
359 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
360 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
361 #else
362 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
363 if (!nbl) {
364 /* buffers will be freed in MakePackets */
365 FUNCTION_MSG("No free nbls\n");
366 //FUNCTION_EXIT();
367 return FALSE;
368 }
370 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
371 if (!packet) {
372 FUNCTION_MSG("No free packets\n");
373 NdisFreeNetBufferList(nbl);
374 //FUNCTION_EXIT();
375 return FALSE;
376 }
377 #endif
379 if ((!pi->first_mdl->Next || (xi->config_rx_coalesce && pi->total_length <= PAGE_SIZE)) && !pi->split_required) {
380 /* a single buffer <= MTU */
381 header_buf = NULL;
382 /* get all the packet into the header */
383 XenNet_BuildHeader(pi, pi->first_mdl_virtual, PAGE_SIZE);
384 #if NTDDI_VERSION < NTDDI_VISTA
385 NdisChainBufferAtBack(packet, pi->first_mdl);
386 PACKET_FIRST_PB(packet) = pi->first_pb;
387 #else
388 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
389 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
390 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
391 NET_BUFFER_DATA_OFFSET(packet) = 0;
392 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
393 NB_FIRST_PB(packet) = pi->first_pb;
394 #endif
395 ref_pb(xi, pi->first_pb);
396 } else {
397 XN_ASSERT(ndis_os_minor_version >= 1);
398 header_buf = get_hb_from_freelist(xi);
399 if (!header_buf) {
400 FUNCTION_MSG("No free header buffers\n");
401 #if NTDDI_VERSION < NTDDI_VISTA
402 NdisUnchainBufferAtFront(packet, &curr_mdl);
403 NdisFreePacket(packet);
404 #else
405 NdisFreeNetBufferList(nbl);
406 NdisFreeNetBuffer(packet);
407 #endif
408 return FALSE;
409 }
410 header_va = (PUCHAR)(header_buf + 1);
411 NdisMoveMemory(header_va, pi->header, pi->header_length);
412 //if (pi->ip_proto == 50) {
413 // FUNCTION_MSG("header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead);
414 // FUNCTION_MSG("ip4_header_length = %d\n", pi->ip4_header_length);
415 // FUNCTION_MSG("tcp_header_length = %d\n", pi->tcp_header_length);
416 //}
417 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
418 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
419 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
420 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
421 header_buf->mdl->ByteCount = pi->header_length;
422 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
423 #if NTDDI_VERSION < NTDDI_VISTA
424 PACKET_FIRST_PB(packet) = header_buf;
425 header_buf->next = pi->curr_pb;
426 NdisChainBufferAtBack(packet, mdl_head);
427 #else
428 NB_FIRST_PB(packet) = header_buf;
429 header_buf->next = pi->curr_pb;
430 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
431 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
432 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
433 NET_BUFFER_DATA_OFFSET(packet) = 0;
434 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
435 #endif
437 if (pi->split_required) {
438 /* must be ip4 */
439 ULONG tcp_length;
440 USHORT new_ip4_length;
441 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
442 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
443 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
444 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
445 pi->tcp_seq += tcp_length;
446 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
447 /* part of the packet is already present in the header buffer for lookahead */
448 out_remaining = tcp_length - header_extra;
449 XN_ASSERT((LONG)out_remaining >= 0);
450 } else {
451 out_remaining = pi->total_length - pi->header_length;
452 XN_ASSERT((LONG)out_remaining >= 0);
453 }
455 while (out_remaining != 0) {
456 //ULONG in_buffer_offset;
457 ULONG in_buffer_length;
458 ULONG out_length;
460 //if (pi->ip_proto == 50) {
461 // FUNCTION_MSG("in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb);
462 //}
463 if (!pi->curr_mdl || !pi->curr_pb) {
464 FUNCTION_MSG("out of buffers for packet\n");
465 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
466 // TODO: free some stuff or we'll leak
467 /* unchain buffers then free packet */
468 //FUNCTION_EXIT();
469 return FALSE;
470 }
472 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
473 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
474 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
475 XN_ASSERT(curr_mdl);
476 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
477 mdl_tail->Next = curr_mdl;
478 mdl_tail = curr_mdl;
479 curr_mdl->Next = NULL; /* I think this might be redundant */
480 #if NTDDI_VERSION < NTDDI_VISTA
481 #else
482 NET_BUFFER_DATA_LENGTH(packet) += out_length;
483 #endif
484 ref_pb(xi, pi->curr_pb);
485 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
486 if (pi->curr_mdl_offset == in_buffer_length) {
487 pi->curr_mdl = pi->curr_mdl->Next;
488 pi->curr_pb = pi->curr_pb->next;
489 pi->curr_mdl_offset = 0;
490 }
491 out_remaining -= out_length;
492 }
493 #if NTDDI_VERSION < NTDDI_VISTA
494 if (pi->split_required) {
495 // TODO: only if Ip checksum is disabled...
496 XenNet_SumIpHeader(header_va, pi->ip4_header_length);
497 }
498 #endif
499 if (header_extra > 0)
500 pi->header_length -= header_extra;
501 }
503 rc->packet_count++;
504 #if NTDDI_VERSION < NTDDI_VISTA
505 #else
506 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
507 #endif
509 if (pi->parse_result == PARSE_OK) {
510 #if NTDDI_VERSION < NTDDI_VISTA
511 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
512 packet, TcpIpChecksumPacketInfo);
513 XN_ASSERT(csum_info->Value == 0);
514 if (pi->csum_blank || pi->data_validated || pi->split_required) {
515 BOOLEAN checksum_offload = FALSE;
516 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
517 if (xi->setting_csum.V4Receive.IpChecksum) {
518 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
519 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
520 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
521 else
522 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
523 }
524 }
525 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
526 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
527 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
528 checksum_offload = TRUE;
529 }
530 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
531 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
532 checksum_offload = TRUE;
533 }
534 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
535 XenNet_SumPacketData(pi, packet, TRUE);
536 }
537 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
538 if (xi->setting_csum.V4Receive.IpChecksum) {
539 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
540 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
541 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
542 else
543 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
544 }
545 }
546 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
547 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
548 if (XenNet_SumPacketData(pi, packet, FALSE)) {
549 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
550 } else {
551 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
552 }
553 }
554 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
555 if (XenNet_SumPacketData(pi, packet, FALSE)) {
556 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
557 } else {
558 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
559 }
560 }
561 }
562 #else
563 csum_info.Value = 0;
564 if (pi->csum_blank || pi->data_validated || pi->mss) {
565 if (pi->ip_proto == 6) {
566 csum_info.Receive.IpChecksumSucceeded = TRUE;
567 csum_info.Receive.TcpChecksumSucceeded = TRUE;
568 } else if (pi->ip_proto == 17) {
569 csum_info.Receive.IpChecksumSucceeded = TRUE;
570 csum_info.Receive.UdpChecksumSucceeded = TRUE;
571 }
572 }
573 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
574 #endif
575 }
577 #if NTDDI_VERSION < NTDDI_VISTA
578 if (!rc->first_packet) {
579 rc->first_packet = packet;
580 } else {
581 PACKET_NEXT_PACKET(rc->last_packet) = packet;
582 }
583 rc->last_packet = packet;
584 rc->packet_count++;
585 #else
586 if (!rc->first_nbl) {
587 rc->first_nbl = nbl;
588 } else {
589 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
590 }
591 rc->last_nbl = nbl;
592 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
593 rc->nbl_count++;
594 if (pi->is_multicast) {
595 /* multicast */
596 xi->stats.ifHCInMulticastPkts++;
597 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
598 } else if (pi->is_broadcast) {
599 /* broadcast */
600 xi->stats.ifHCInBroadcastPkts++;
601 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
602 } else {
603 /* unicast */
604 xi->stats.ifHCInUcastPkts++;
605 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
606 }
607 #endif
609 outstanding = InterlockedIncrement(&xi->rx_outstanding);
610 #if NTDDI_VERSION < NTDDI_VISTA
611 if (outstanding > RX_PACKET_HIGH_WATER_MARK || !xi->rx_pb_free) {
612 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
613 } else {
614 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
615 }
616 #if 0
617 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
618 NdisQueryPacketLength(packet, &packet_length);
619 if (pi->parse_result != PARSE_OK || (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH))
620 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
621 else
622 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
623 #endif
624 #endif
626 //FUNCTION_EXIT();
627 return TRUE;
628 }
630 static VOID
631 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
632 {
633 UCHAR tcp_flags;
634 shared_buffer_t *page_buf;
636 XenNet_ParsePacketHeader(pi, NULL, XN_HDR_SIZE + xi->current_lookahead);
638 if (!XenNet_FilterAcceptPacket(xi, pi)) {
639 goto done;
640 }
642 if (pi->split_required) {
643 #if NTDDI_VERSION < NTDDI_VISTA
644 /* need to split to mss for NDIS5 */
645 #else
646 switch (xi->current_gso_rx_split_type) {
647 case RX_LSO_SPLIT_HALF:
648 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
649 break;
650 case RX_LSO_SPLIT_NONE:
651 pi->mss = 65535;
652 break;
653 }
654 #endif
655 }
657 switch (pi->ip_proto) {
658 case 6: // TCP
659 if (pi->split_required)
660 break;
661 /* fall through */
662 case 17: // UDP
663 if (!XenNet_MakePacket(xi, rc, pi)) {
664 FUNCTION_MSG("Failed to make packet\n");
665 #if NTDDI_VERSION < NTDDI_VISTA
666 xi->stat_rx_no_buffer++;
667 #else
668 xi->stats.ifInDiscards++;
669 #endif
670 goto done;
671 }
672 goto done;
673 default:
674 if (!XenNet_MakePacket(xi, rc, pi)) {
675 FUNCTION_MSG("Failed to make packet\n");
676 #if NTDDI_VERSION < NTDDI_VISTA
677 xi->stat_rx_no_buffer++;
678 #else
679 xi->stats.ifInDiscards++;
680 #endif
681 goto done;
682 }
683 goto done;
684 }
686 /* this is the split_required code */
687 pi->tcp_remaining = pi->tcp_length;
689 /* we can make certain assumptions here as the following code is only for tcp4 */
690 tcp_flags = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13];
691 /* clear all tcp flags except ack except for the last packet */
692 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= 0x10;
693 while (pi->tcp_remaining) {
694 if (pi->tcp_remaining <= pi->mss) {
695 /* restore tcp flags for the last packet */
696 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] = tcp_flags;
697 }
698 if (!XenNet_MakePacket(xi, rc, pi)) {
699 FUNCTION_MSG("Failed to make packet\n");
700 #if NTDDI_VERSION < NTDDI_VISTA
701 xi->stat_rx_no_buffer++;
702 #else
703 xi->stats.ifInDiscards++;
704 #endif
705 break; /* we are out of memory - just drop the packets */
706 }
707 }
708 done:
709 page_buf = pi->first_pb;
710 while (page_buf) {
711 shared_buffer_t *next_pb = page_buf->next;
712 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
713 page_buf = next_pb;
714 }
715 XenNet_ClearPacketInfo(pi);
716 //FUNCTION_EXIT();
717 return;
718 }
720 #if NTDDI_VERSION < NTDDI_VISTA
721 /* called at DISPATCH_LEVEL */
722 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
723 VOID
724 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
725 struct xennet_info *xi = adapter_context;
726 PNDIS_BUFFER buffer;
727 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
729 //FUNCTION_ENTER();
730 NdisUnchainBufferAtFront(packet, &buffer);
732 while (buffer) {
733 shared_buffer_t *next_buf;
734 XN_ASSERT(page_buf);
735 next_buf = page_buf->next;
736 if (!page_buf->virtual) {
737 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
738 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
739 } else {
740 if (buffer != page_buf->mdl)
741 NdisFreeBuffer(buffer);
742 put_pb_on_freelist(xi, page_buf);
743 }
744 NdisUnchainBufferAtFront(packet, &buffer);
745 page_buf = next_buf;
746 }
748 NdisFreePacket(packet);
749 InterlockedDecrement(&xi->rx_outstanding);
750 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
751 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
752 //FUNCTION_EXIT();
753 }
754 #else
755 /* called at <= DISPATCH_LEVEL */
756 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
757 VOID
758 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
759 {
760 struct xennet_info *xi = adapter_context;
761 UNREFERENCED_PARAMETER(return_flags);
763 //FUNCTION_ENTER();
765 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
767 XN_ASSERT(xi);
768 while (curr_nbl)
769 {
770 PNET_BUFFER_LIST next_nbl;
771 PNET_BUFFER curr_nb;
773 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
774 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
775 while (curr_nb)
776 {
777 PNET_BUFFER next_nb;
778 PMDL curr_mdl;
779 shared_buffer_t *page_buf;
781 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
782 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
783 page_buf = NB_FIRST_PB(curr_nb);
784 while (curr_mdl)
785 {
786 shared_buffer_t *next_buf;
787 PMDL next_mdl;
789 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
790 next_mdl = curr_mdl->Next;
791 next_buf = page_buf->next;
792 if (!page_buf->virtual)
793 {
794 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
795 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
796 }
797 else
798 {
799 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
800 if (curr_mdl != page_buf->mdl)
801 {
802 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
803 IoFreeMdl(curr_mdl);
804 }
805 put_pb_on_freelist(xi, page_buf);
806 }
807 curr_mdl = next_mdl;
808 page_buf = next_buf;
809 }
811 NdisFreeNetBuffer(curr_nb);
812 InterlockedDecrement(&xi->rx_outstanding);
814 curr_nb = next_nb;
815 }
816 NdisFreeNetBufferList(curr_nbl);
817 curr_nbl = next_nbl;
818 }
820 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
821 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
823 //FUNCTION_EXIT();
824 }
825 #endif
827 /* We limit the number of packets per interrupt so that acks get a chance
828 under high rx load. The DPC is immediately re-scheduled */
830 #define MAXIMUM_PACKETS_PER_INDICATE 32
832 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
833 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
835 // Called at DISPATCH_LEVEL
836 BOOLEAN
837 XenNet_RxBufferCheck(struct xennet_info *xi)
838 {
839 RING_IDX cons, prod;
840 ULONG packet_count = 0;
841 ULONG packet_data = 0;
842 ULONG buffer_count = 0;
843 USHORT id;
844 int more_to_do = FALSE;
845 shared_buffer_t *page_buf;
846 #if NTDDI_VERSION < NTDDI_VISTA
847 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
848 PNDIS_PACKET first_header_only_packet;
849 PNDIS_PACKET last_header_only_packet;
850 #else
851 #endif
852 //ULONG nbl_count = 0;
853 ULONG interim_packet_data = 0;
854 struct netif_extra_info *ei;
855 rx_context_t rc;
856 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
857 shared_buffer_t *head_buf = NULL;
858 shared_buffer_t *tail_buf = NULL;
859 shared_buffer_t *last_buf = NULL;
860 BOOLEAN extra_info_flag = FALSE;
861 BOOLEAN more_data_flag = FALSE;
862 BOOLEAN dont_set_event;
863 //FUNCTION_ENTER();
865 #if NTDDI_VERSION < NTDDI_VISTA
866 rc.first_packet = NULL;
867 rc.last_packet = NULL;
868 rc.packet_count = 0;
869 #else
870 rc.first_nbl = NULL;
871 rc.last_nbl = NULL;
872 rc.packet_count = 0;
873 rc.nbl_count = 0;
874 #endif
876 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
877 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
879 if (xi->device_state != DEVICE_STATE_ACTIVE) {
880 /* there is a chance that our Dpc had been queued just before the shutdown... */
881 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
882 return FALSE;
883 }
885 if (xi->rx_partial_buf) {
886 head_buf = xi->rx_partial_buf;
887 tail_buf = xi->rx_partial_buf;
888 while (tail_buf->next)
889 tail_buf = tail_buf->next;
890 more_data_flag = xi->rx_partial_more_data_flag;
891 extra_info_flag = xi->rx_partial_extra_info_flag;
892 xi->rx_partial_buf = NULL;
893 }
895 do {
896 prod = xi->rx_ring.sring->rsp_prod;
897 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
899 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
900 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
901 page_buf = xi->rx_ring_pbs[id];
902 XN_ASSERT(page_buf);
903 xi->rx_ring_pbs[id] = NULL;
904 xi->rx_id_free++;
905 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
906 if (!extra_info_flag) {
907 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
908 FUNCTION_MSG("Error: rsp offset %d, size %d\n",
909 page_buf->rsp.offset, page_buf->rsp.status);
910 XN_ASSERT(!extra_info_flag);
911 put_pb_on_freelist(xi, page_buf);
912 continue;
913 }
914 }
916 if (!head_buf) {
917 head_buf = page_buf;
918 tail_buf = page_buf;
919 } else {
920 tail_buf->next = page_buf;
921 tail_buf = page_buf;
922 }
923 page_buf->next = NULL;
925 if (extra_info_flag) {
926 ei = (struct netif_extra_info *)&page_buf->rsp;
927 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
928 } else {
929 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
930 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
931 interim_packet_data += page_buf->rsp.status;
932 }
934 if (!extra_info_flag && !more_data_flag) {
935 last_buf = page_buf;
936 packet_count++;
937 packet_data += interim_packet_data;
938 interim_packet_data = 0;
939 }
940 buffer_count++;
941 }
942 xi->rx_ring.rsp_cons = cons;
944 /* Give netback more buffers */
945 XenNet_FillRing(xi);
947 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
948 break;
950 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
951 if (!more_to_do) {
952 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
953 KeMemoryBarrier();
954 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
955 }
956 } while (more_to_do);
958 /* anything past last_buf belongs to an incomplete packet... */
959 if (last_buf && last_buf->next)
960 {
961 FUNCTION_MSG("Partial receive\n");
962 xi->rx_partial_buf = last_buf->next;
963 xi->rx_partial_more_data_flag = more_data_flag;
964 xi->rx_partial_extra_info_flag = extra_info_flag;
965 last_buf->next = NULL;
966 }
968 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
970 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
971 {
972 /* fire again immediately */
973 FUNCTION_MSG("Dpc Duration Exceeded\n");
974 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
975 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
976 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
977 /* dont set an event in TX path */
978 dont_set_event = TRUE;
979 }
980 else
981 {
982 /* make sure the Dpc queue is run immediately next interrupt */
983 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
984 /* set an event in TX path */
985 dont_set_event = FALSE;
986 }
988 /* make packets out of the buffers */
989 page_buf = head_buf;
990 extra_info_flag = FALSE;
991 more_data_flag = FALSE;
993 while (page_buf) {
994 shared_buffer_t *next_buf = page_buf->next;
995 PMDL mdl;
997 page_buf->next = NULL;
998 if (extra_info_flag) {
999 //KdPrint((__DRIVER_NAME " processing extra info\n"));
1000 ei = (struct netif_extra_info *)&page_buf->rsp;
1001 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
1002 switch (ei->type)
1004 case XEN_NETIF_EXTRA_TYPE_GSO:
1005 switch (ei->u.gso.type) {
1006 case XEN_NETIF_GSO_TYPE_TCPV4:
1007 pi->mss = ei->u.gso.size;
1008 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1009 break;
1010 default:
1011 FUNCTION_MSG("Unknown GSO type (%d) detected\n", ei->u.gso.type);
1012 break;
1014 break;
1015 default:
1016 FUNCTION_MSG("Unknown extra info type (%d) detected\n", ei->type);
1017 break;
1019 put_pb_on_freelist(xi, page_buf);
1020 } else {
1021 XN_ASSERT(!page_buf->rsp.offset);
1022 if (!more_data_flag) { // handling the packet's 1st buffer
1023 if (page_buf->rsp.flags & NETRXF_csum_blank)
1024 pi->csum_blank = TRUE;
1025 if (page_buf->rsp.flags & NETRXF_data_validated)
1026 pi->data_validated = TRUE;
1028 mdl = page_buf->mdl;
1029 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1030 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1031 if (pi->first_pb) {
1032 XN_ASSERT(pi->curr_pb);
1033 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1034 pi->curr_pb->next = page_buf;
1035 pi->curr_pb = page_buf;
1036 XN_ASSERT(pi->curr_mdl);
1037 pi->curr_mdl->Next = mdl;
1038 pi->curr_mdl = mdl;
1039 } else {
1040 pi->first_pb = page_buf;
1041 pi->curr_pb = page_buf;
1042 pi->first_mdl = mdl;
1043 pi->curr_mdl = mdl;
1045 //pi->mdl_count++;
1046 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1047 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1048 pi->total_length = pi->total_length + page_buf->rsp.status;
1051 /* Packet done, add it to the list */
1052 if (!more_data_flag && !extra_info_flag) {
1053 pi->curr_pb = pi->first_pb;
1054 pi->curr_mdl = pi->first_mdl;
1055 XenNet_MakePackets(xi, &rc, pi);
1058 page_buf = next_buf;
1060 XN_ASSERT(!more_data_flag && !extra_info_flag);
1062 #if NTDDI_VERSION < NTDDI_VISTA
1063 packet_count = 0;
1064 first_header_only_packet = NULL;
1065 last_header_only_packet = NULL;
1067 while (rc.first_packet) {
1068 PNDIS_PACKET packet;
1069 NDIS_STATUS status;
1071 packet = rc.first_packet;
1072 XN_ASSERT(PACKET_FIRST_PB(packet));
1073 rc.first_packet = PACKET_NEXT_PACKET(packet);
1074 status = NDIS_GET_PACKET_STATUS(packet);
1075 if (status == NDIS_STATUS_RESOURCES) {
1076 if (!first_header_only_packet) {
1077 first_header_only_packet = packet;
1078 } else {
1079 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1081 last_header_only_packet = packet;
1082 PACKET_NEXT_PACKET(packet) = NULL;
1084 packets[packet_count++] = packet;
1085 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1086 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1087 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1088 && status == NDIS_STATUS_RESOURCES)) {
1089 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1090 packet_count = 0;
1093 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1094 while (first_header_only_packet) {
1095 PNDIS_PACKET packet = first_header_only_packet;
1096 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1097 XenNet_ReturnPacket(xi, packet);
1099 #else
1100 if (rc.first_nbl) {
1101 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1102 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1103 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1104 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1105 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1107 #endif
1108 //FUNCTION_EXIT();
1109 return dont_set_event;
1112 static VOID
1113 XenNet_BufferFree(xennet_info_t *xi)
1115 shared_buffer_t *sb;
1116 int i;
1118 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1119 if (xi->rx_ring_pbs[i] != NULL) {
1120 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1121 xi->rx_ring_pbs[i] = NULL;
1125 /* because we are shutting down this won't allocate new ones */
1126 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1127 XnEndAccess(xi->handle,
1128 sb->gref, FALSE, (ULONG)'XNRX');
1129 IoFreeMdl(sb->mdl);
1130 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1131 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1133 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1134 IoFreeMdl(sb->mdl);
1135 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1139 BOOLEAN
1140 XenNet_RxInit(xennet_info_t *xi) {
1141 #if NTDDI_VERSION < NTDDI_VISTA
1142 NDIS_STATUS status;
1143 #else
1144 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1145 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1146 #endif
1147 int ret;
1148 int i;
1150 FUNCTION_ENTER();
1152 // this stuff needs to be done once only...
1153 KeInitializeSpinLock(&xi->rx_lock);
1154 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1155 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1156 if (!xi->rxpi) {
1157 FUNCTION_MSG("ExAllocatePoolWithTagPriority failed\n");
1158 return FALSE;
1160 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1162 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1163 if (!ret) {
1164 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1165 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1166 return FALSE;
1168 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1169 if (!ret) {
1170 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1171 stack_delete(xi->rx_pb_stack, NULL, NULL);
1172 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1173 return FALSE;
1176 xi->rx_id_free = NET_RX_RING_SIZE;
1177 xi->rx_outstanding = 0;
1179 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1180 xi->rx_ring_pbs[i] = NULL;
1183 #if NTDDI_VERSION < NTDDI_VISTA
1184 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1185 if (status != NDIS_STATUS_SUCCESS) {
1186 FUNCTION_MSG("NdisAllocatePacketPool failed with 0x%x\n", status);
1187 return FALSE;
1189 #else
1190 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1191 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1192 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1193 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1194 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1195 nbl_pool_parameters.ContextSize = 0;
1196 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1197 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1199 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1200 if (!xi->rx_nbl_pool) {
1201 FUNCTION_MSG("NdisAllocateNetBufferListPool failed\n");
1202 return FALSE;
1205 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1206 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1207 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1208 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1209 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1210 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1211 if (!xi->rx_packet_pool) {
1212 FUNCTION_MSG("NdisAllocateNetBufferPool (rx_packet_pool) failed\n");
1213 return FALSE;
1215 #endif
1216 XenNet_FillRing(xi);
1218 FUNCTION_EXIT();
1220 return TRUE;
1223 VOID
1224 XenNet_RxShutdown(xennet_info_t *xi) {
1225 KIRQL old_irql;
1226 UNREFERENCED_PARAMETER(xi);
1228 FUNCTION_ENTER();
1230 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1231 while (xi->rx_outstanding) {
1232 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1233 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1234 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1235 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1237 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1239 XenNet_BufferFree(xi);
1241 stack_delete(xi->rx_pb_stack, NULL, NULL);
1242 stack_delete(xi->rx_hb_stack, NULL, NULL);
1245 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1247 #if NTDDI_VERSION < NTDDI_VISTA
1248 NdisFreePacketPool(xi->rx_packet_pool);
1249 #else
1250 NdisFreeNetBufferPool(xi->rx_packet_pool);
1251 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1252 #endif
1254 FUNCTION_EXIT();
1255 return;