win-pvdrivers

view xennet/xennet_rx.c @ 1023:1ce315b193d1

Change all NT_ASSERT to XN_ASSERT
author James Harper <james.harper@bendigoit.com.au>
date Tue Feb 19 15:12:35 2013 +1100 (2013-02-19)
parents 9b6213b6be25
children cb767700f91c
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static __inline shared_buffer_t *
24 get_pb_from_freelist(struct xennet_info *xi)
25 {
26 shared_buffer_t *pb;
27 PVOID ptr_ref;
29 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
30 {
31 pb = ptr_ref;
32 pb->ref_count = 1;
33 InterlockedDecrement(&xi->rx_pb_free);
34 return pb;
35 }
37 /* don't allocate a new one if we are shutting down */
38 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
39 return NULL;
41 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
42 if (!pb)
43 return NULL;
44 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
45 if (!pb->virtual)
46 {
47 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
48 return NULL;
49 }
50 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
51 if (!pb->mdl)
52 {
53 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
54 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
55 return NULL;
56 }
57 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
58 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
59 if (pb->gref == INVALID_GRANT_REF)
60 {
61 IoFreeMdl(pb->mdl);
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 MmBuildMdlForNonPagedPool(pb->mdl);
67 pb->ref_count = 1;
68 return pb;
69 }
71 static __inline VOID
72 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
73 {
74 UNREFERENCED_PARAMETER(xi);
75 InterlockedIncrement(&pb->ref_count);
76 }
78 static __inline VOID
79 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
80 {
81 if (InterlockedDecrement(&pb->ref_count) == 0)
82 {
83 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
84 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
85 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
86 {
87 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
88 IoFreeMdl(pb->mdl);
89 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
90 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
91 return;
92 }
93 pb->mdl->ByteCount = PAGE_SIZE;
94 pb->mdl->Next = NULL;
95 pb->next = NULL;
96 stack_push(xi->rx_pb_stack, pb);
97 InterlockedIncrement(&xi->rx_pb_free);
98 }
99 }
101 static __inline shared_buffer_t *
102 get_hb_from_freelist(struct xennet_info *xi)
103 {
104 shared_buffer_t *hb;
105 PVOID ptr_ref;
107 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
108 {
109 hb = ptr_ref;
110 InterlockedDecrement(&xi->rx_hb_free);
111 return hb;
112 }
114 /* don't allocate a new one if we are shutting down */
115 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
116 return NULL;
118 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
119 if (!hb)
120 return NULL;
121 NdisZeroMemory(hb, sizeof(shared_buffer_t));
122 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
123 if (!hb->mdl) {
124 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
125 return NULL;
126 }
127 MmBuildMdlForNonPagedPool(hb->mdl);
128 return hb;
129 }
131 static __inline VOID
132 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
133 {
134 XN_ASSERT(xi);
135 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
136 hb->mdl->Next = NULL;
137 hb->next = NULL;
138 stack_push(xi->rx_hb_stack, hb);
139 InterlockedIncrement(&xi->rx_hb_free);
140 }
142 // Called at DISPATCH_LEVEL with rx lock held
143 static VOID
144 XenNet_FillRing(struct xennet_info *xi)
145 {
146 unsigned short id;
147 shared_buffer_t *page_buf;
148 ULONG i, notify;
149 ULONG batch_target;
150 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
151 netif_rx_request_t *req;
153 //FUNCTION_ENTER();
155 if (xi->device_state != DEVICE_STATE_ACTIVE)
156 return;
158 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
160 if (batch_target < (xi->rx_target >> 2)) {
161 //FUNCTION_EXIT();
162 return; /* only refill if we are less than 3/4 full already */
163 }
165 for (i = 0; i < batch_target; i++) {
166 page_buf = get_pb_from_freelist(xi);
167 if (!page_buf) {
168 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target));
169 break;
170 }
171 xi->rx_id_free--;
173 /* Give to netback */
174 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
175 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
176 xi->rx_ring_pbs[id] = page_buf;
177 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
178 req->id = id;
179 req->gref = page_buf->gref;
180 XN_ASSERT(req->gref != INVALID_GRANT_REF);
181 }
182 KeMemoryBarrier();
183 xi->rx_ring.req_prod_pvt = req_prod + i;
184 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
185 if (notify) {
186 XnNotify(xi->handle, xi->event_channel);
187 }
189 //FUNCTION_EXIT();
191 return;
192 }
194 #if NTDDI_VERSION < NTDDI_VISTA
195 typedef struct {
196 PNDIS_PACKET first_packet;
197 PNDIS_PACKET last_packet;
198 ULONG packet_count;
199 } rx_context_t;
200 #else
201 typedef struct {
202 PNET_BUFFER_LIST first_nbl;
203 PNET_BUFFER_LIST last_nbl;
204 ULONG packet_count;
205 ULONG nbl_count;
206 } rx_context_t;
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 /*
211 NDIS5 appears to insist that the checksum on received packets is correct, and won't
212 believe us when we lie about it, which happens when the packet is generated on the
213 same bridge in Dom0. Doh!
214 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
215 */
217 static BOOLEAN
218 XenNet_SumPacketData(
219 packet_info_t *pi,
220 PNDIS_PACKET packet,
221 BOOLEAN set_csum) {
222 USHORT i;
223 PUCHAR buffer;
224 PMDL mdl;
225 UINT total_length;
226 UINT data_length;
227 UINT buffer_length;
228 USHORT buffer_offset;
229 ULONG csum;
230 PUSHORT csum_ptr;
231 USHORT remaining;
232 USHORT ip4_length;
233 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
235 //FUNCTION_ENTER();
237 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
238 if (!buffer) {
239 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
240 return FALSE;
241 }
242 XN_ASSERT(mdl);
244 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
245 data_length = ip4_length + XN_HDR_SIZE;
247 if ((USHORT)data_length > total_length) {
248 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
249 return FALSE;
250 }
252 switch (pi->ip_proto) {
253 case 6:
254 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
255 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
256 break;
257 case 17:
258 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
259 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
260 break;
261 default:
262 KdPrint((__DRIVER_NAME " Don't know how to calc sum for IP Proto %d\n", pi->ip_proto));
263 //FUNCTION_EXIT();
264 return FALSE; // should never happen
265 }
267 if (set_csum)
268 *csum_ptr = 0;
270 csum = 0;
271 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
272 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
273 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
275 remaining = ip4_length - pi->ip4_header_length;
277 csum += remaining;
279 csum_span = FALSE;
280 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
281 while (i < data_length) {
282 /* don't include the checksum field itself in the calculation */
283 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
284 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
285 i += 2;
286 buffer_offset += 2;
287 continue;
288 }
289 if (csum_span) {
290 /* the other half of the next bit */
291 XN_ASSERT(buffer_offset == 0);
292 csum += (USHORT)buffer[buffer_offset];
293 csum_span = FALSE;
294 i += 1;
295 buffer_offset += 1;
296 } else if (buffer_offset == buffer_length - 1) {
297 /* deal with a buffer ending on an odd byte boundary */
298 csum += (USHORT)buffer[buffer_offset] << 8;
299 csum_span = TRUE;
300 i += 1;
301 buffer_offset += 1;
302 } else {
303 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
304 i += 2;
305 buffer_offset += 2;
306 }
307 if (buffer_offset == buffer_length && i < total_length) {
308 NdisGetNextBuffer(mdl, &mdl);
309 if (mdl == NULL) {
310 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
311 return FALSE; // should never happen
312 }
313 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
314 XN_ASSERT(buffer_length);
315 buffer_offset = 0;
316 }
317 }
319 while (csum & 0xFFFF0000)
320 csum = (csum & 0xFFFF) + (csum >> 16);
322 if (set_csum) {
323 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
324 } else {
325 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
326 }
327 return TRUE;
328 }
329 #endif
331 static BOOLEAN
332 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
333 #if NTDDI_VERSION < NTDDI_VISTA
334 NDIS_STATUS status;
335 PNDIS_PACKET packet;
336 #else
337 PNET_BUFFER_LIST nbl;
338 PNET_BUFFER packet;
339 #endif
340 PMDL mdl_head, mdl_tail, curr_mdl;
341 PUCHAR header_va;
342 ULONG out_remaining;
343 ULONG header_extra;
344 shared_buffer_t *header_buf;
345 #if NTDDI_VERSION < NTDDI_VISTA
346 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
347 UINT packet_length;
348 #else
349 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
350 #endif
351 //FUNCTION_ENTER();
353 #if NTDDI_VERSION < NTDDI_VISTA
354 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
355 if (status != NDIS_STATUS_SUCCESS) {
356 FUNCTION_MSG("No free packets\n");
357 return FALSE;
358 }
359 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
360 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
361 #else
362 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
363 if (!nbl) {
364 /* buffers will be freed in MakePackets */
365 KdPrint((__DRIVER_NAME " No free nbls\n"));
366 //FUNCTION_EXIT();
367 return FALSE;
368 }
370 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
371 if (!packet) {
372 KdPrint((__DRIVER_NAME " No free packets\n"));
373 NdisFreeNetBufferList(nbl);
374 //FUNCTION_EXIT();
375 return FALSE;
376 }
377 #endif
379 if (!pi->first_mdl->Next && !pi->split_required) {
380 /* a single buffer <= MTU */
381 header_buf = NULL;
382 XenNet_BuildHeader(pi, pi->first_mdl_virtual, pi->first_mdl_length);
383 #if NTDDI_VERSION < NTDDI_VISTA
384 NdisChainBufferAtBack(packet, pi->first_mdl);
385 PACKET_FIRST_PB(packet) = pi->first_pb;
386 #else
387 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
388 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
389 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
390 NET_BUFFER_DATA_OFFSET(packet) = 0;
391 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
392 NB_FIRST_PB(packet) = pi->first_pb;
393 #endif
394 ref_pb(xi, pi->first_pb);
395 } else {
396 XN_ASSERT(ndis_os_minor_version >= 1);
397 header_buf = get_hb_from_freelist(xi);
398 if (!header_buf) {
399 FUNCTION_MSG("No free header buffers\n");
400 #if NTDDI_VERSION < NTDDI_VISTA
401 NdisUnchainBufferAtFront(packet, &curr_mdl);
402 NdisFreePacket(packet);
403 #else
404 NdisFreeNetBufferList(nbl);
405 NdisFreeNetBuffer(packet);
406 #endif
407 return FALSE;
408 }
409 header_va = (PUCHAR)(header_buf + 1);
410 NdisMoveMemory(header_va, pi->header, pi->header_length);
411 //KdPrint((__DRIVER_NAME " header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead));
412 //KdPrint((__DRIVER_NAME " ip4_header_length = %d\n", pi->ip4_header_length));
413 //KdPrint((__DRIVER_NAME " tcp_header_length = %d\n", pi->tcp_header_length));
414 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
415 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
416 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
417 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
418 header_buf->mdl->ByteCount = pi->header_length;
419 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
420 #if NTDDI_VERSION < NTDDI_VISTA
421 PACKET_FIRST_PB(packet) = header_buf;
422 header_buf->next = pi->curr_pb;
423 NdisChainBufferAtBack(packet, mdl_head);
424 #else
425 NB_FIRST_PB(packet) = header_buf;
426 header_buf->next = pi->curr_pb;
427 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
428 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
429 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
430 NET_BUFFER_DATA_OFFSET(packet) = 0;
431 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
432 #endif
434 if (pi->split_required) {
435 ULONG tcp_length;
436 USHORT new_ip4_length;
437 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
438 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
439 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
440 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
441 pi->tcp_seq += tcp_length;
442 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
443 /* part of the packet is already present in the header buffer for lookahead */
444 out_remaining = tcp_length - header_extra;
445 XN_ASSERT((LONG)out_remaining >= 0);
446 } else {
447 out_remaining = pi->total_length - pi->header_length;
448 XN_ASSERT((LONG)out_remaining >= 0);
449 }
451 while (out_remaining != 0) {
452 //ULONG in_buffer_offset;
453 ULONG in_buffer_length;
454 ULONG out_length;
456 //KdPrint((__DRIVER_NAME " in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
457 if (!pi->curr_mdl || !pi->curr_pb) {
458 KdPrint((__DRIVER_NAME " out of buffers for packet\n"));
459 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
460 // TODO: free some stuff or we'll leak
461 /* unchain buffers then free packet */
462 //FUNCTION_EXIT();
463 return FALSE;
464 }
466 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
467 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
468 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
469 XN_ASSERT(curr_mdl);
470 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
471 mdl_tail->Next = curr_mdl;
472 mdl_tail = curr_mdl;
473 curr_mdl->Next = NULL; /* I think this might be redundant */
474 #if NTDDI_VERSION < NTDDI_VISTA
475 #else
476 NET_BUFFER_DATA_LENGTH(packet) += out_length;
477 #endif
478 ref_pb(xi, pi->curr_pb);
479 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
480 if (pi->curr_mdl_offset == in_buffer_length) {
481 pi->curr_mdl = pi->curr_mdl->Next;
482 pi->curr_pb = pi->curr_pb->next;
483 pi->curr_mdl_offset = 0;
484 }
485 out_remaining -= out_length;
486 }
487 if (pi->split_required) {
488 // TODO: only if Ip checksum is disabled...
489 //XenNet_SumIpHeader(header_va, pi->ip4_header_length);
490 }
491 if (header_extra > 0)
492 pi->header_length -= header_extra;
493 }
495 rc->packet_count++;
496 #if NTDDI_VERSION < NTDDI_VISTA
497 #else
498 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
499 #endif
501 if (pi->parse_result == PARSE_OK) {
502 #if NTDDI_VERSION < NTDDI_VISTA
503 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
504 packet, TcpIpChecksumPacketInfo);
505 XN_ASSERT(csum_info->Value == 0);
506 if (pi->csum_blank || pi->data_validated) {
507 BOOLEAN checksum_offload = FALSE;
508 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
509 if (xi->setting_csum.V4Receive.IpChecksum) {
510 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
511 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
512 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
513 else
514 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
515 }
516 }
517 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
518 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
519 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
520 checksum_offload = TRUE;
521 }
522 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
523 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
524 checksum_offload = TRUE;
525 }
526 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
527 XenNet_SumPacketData(pi, packet, TRUE);
528 }
529 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
530 if (xi->setting_csum.V4Receive.IpChecksum) {
531 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
532 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
533 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
534 else
535 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
536 }
537 }
538 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
539 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
540 if (XenNet_SumPacketData(pi, packet, FALSE)) {
541 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
542 } else {
543 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
544 }
545 }
546 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
547 if (XenNet_SumPacketData(pi, packet, FALSE)) {
548 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
549 } else {
550 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
551 }
552 }
553 }
554 #else
555 csum_info.Value = 0;
556 if (pi->csum_blank || pi->data_validated || pi->mss) {
557 if (pi->ip_proto == 6) {
558 csum_info.Receive.IpChecksumSucceeded = TRUE;
559 csum_info.Receive.TcpChecksumSucceeded = TRUE;
560 } else if (pi->ip_proto == 17) {
561 csum_info.Receive.IpChecksumSucceeded = TRUE;
562 csum_info.Receive.UdpChecksumSucceeded = TRUE;
563 }
564 }
565 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
566 #endif
567 }
569 #if NTDDI_VERSION < NTDDI_VISTA
570 if (!rc->first_packet) {
571 rc->first_packet = packet;
572 } else {
573 PACKET_NEXT_PACKET(rc->last_packet) = packet;
574 }
575 rc->last_packet = packet;
576 rc->packet_count++;
577 #else
578 if (!rc->first_nbl) {
579 rc->first_nbl = nbl;
580 } else {
581 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
582 }
583 rc->last_nbl = nbl;
584 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
585 rc->nbl_count++;
586 if (pi->is_multicast) {
587 /* multicast */
588 xi->stats.ifHCInMulticastPkts++;
589 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
590 } else if (pi->is_broadcast) {
591 /* broadcast */
592 xi->stats.ifHCInBroadcastPkts++;
593 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
594 } else {
595 /* unicast */
596 xi->stats.ifHCInUcastPkts++;
597 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
598 }
599 #endif
601 #if NTDDI_VERSION < NTDDI_VISTA
602 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
603 NdisQueryPacketLength(packet, &packet_length);
604 if (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH)
605 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
606 else
607 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
608 #endif
609 //FUNCTION_EXIT();
611 InterlockedIncrement(&xi->rx_outstanding);
612 //FUNCTION_EXIT();
613 return TRUE;
614 }
616 static VOID
617 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
618 {
619 UCHAR psh;
620 //PNDIS_BUFFER buffer;
621 shared_buffer_t *page_buf;
623 //FUNCTION_ENTER();
625 XenNet_ParsePacketHeader(pi, NULL, 0);
626 //pi->split_required = FALSE;
628 if (!XenNet_FilterAcceptPacket(xi, pi)) {
629 goto done;
630 }
632 if (pi->split_required) {
633 #if NTDDI_VERSION < NTDDI_VISTA
634 /* need to split to mss for NDIS5 */
635 #else
636 switch (xi->current_gso_rx_split_type) {
637 case RX_LSO_SPLIT_HALF:
638 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
639 break;
640 case RX_LSO_SPLIT_NONE:
641 pi->mss = 65535;
642 break;
643 }
644 #endif
645 }
647 switch (pi->ip_proto) {
648 case 6: // TCP
649 if (pi->split_required)
650 break;
651 /* fall through */
652 case 17: // UDP
653 if (!XenNet_MakePacket(xi, rc, pi)) {
654 FUNCTION_MSG("Failed to make packet\n");
655 #if NTDDI_VERSION < NTDDI_VISTA
656 xi->stat_rx_no_buffer++;
657 #else
658 xi->stats.ifInDiscards++;
659 #endif
660 goto done;
661 }
662 goto done;
663 default:
664 if (!XenNet_MakePacket(xi, rc, pi)) {
665 FUNCTION_MSG("Failed to make packet\n");
666 #if NTDDI_VERSION < NTDDI_VISTA
667 xi->stat_rx_no_buffer++;
668 #else
669 xi->stats.ifInDiscards++;
670 #endif
671 goto done;
672 }
673 goto done;
674 }
676 /* this is the split_required code */
677 pi->tcp_remaining = pi->tcp_length;
679 /* we can make certain assumptions here as the following code is only for tcp4 */
680 psh = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] & 8;
681 while (pi->tcp_remaining) {
682 if (!XenNet_MakePacket(xi, rc, pi)) {
683 FUNCTION_MSG("Failed to make packet\n");
684 #if NTDDI_VERSION < NTDDI_VISTA
685 xi->stat_rx_no_buffer++;
686 #else
687 xi->stats.ifInDiscards++;
688 #endif
689 break; /* we are out of memory - just drop the packets */
690 }
691 if (psh) {
692 if (pi->tcp_remaining)
693 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= ~8;
694 else
695 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] |= 8;
696 }
697 //XenNet_SumPacketData(pi, packet, TRUE);
698 //entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
699 //InsertTailList(rx_packet_list, entry);
700 }
701 done:
702 page_buf = pi->first_pb;
703 while (page_buf) {
704 shared_buffer_t *next_pb = page_buf->next;
705 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
706 page_buf = next_pb;
707 }
708 XenNet_ClearPacketInfo(pi);
709 //FUNCTION_EXIT();
710 return;
711 }
713 #if NTDDI_VERSION < NTDDI_VISTA
714 /* called at DISPATCH_LEVEL */
715 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
716 VOID
717 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
718 struct xennet_info *xi = adapter_context;
719 PNDIS_BUFFER buffer;
720 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
722 //FUNCTION_ENTER();
723 NdisUnchainBufferAtFront(packet, &buffer);
725 while (buffer) {
726 shared_buffer_t *next_buf;
727 XN_ASSERT(page_buf);
728 next_buf = page_buf->next;
729 if (!page_buf->virtual) {
730 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
731 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
732 } else {
733 if (buffer != page_buf->mdl)
734 NdisFreeBuffer(buffer);
735 put_pb_on_freelist(xi, page_buf);
736 }
737 NdisUnchainBufferAtFront(packet, &buffer);
738 page_buf = next_buf;
739 }
741 NdisFreePacket(packet);
742 InterlockedDecrement(&xi->rx_outstanding);
743 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
744 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
745 //FUNCTION_EXIT();
746 }
747 #else
748 /* called at <= DISPATCH_LEVEL */
749 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
750 VOID
751 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
752 {
753 struct xennet_info *xi = adapter_context;
754 UNREFERENCED_PARAMETER(return_flags);
756 //FUNCTION_ENTER();
758 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
760 XN_ASSERT(xi);
761 while (curr_nbl)
762 {
763 PNET_BUFFER_LIST next_nbl;
764 PNET_BUFFER curr_nb;
766 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
767 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
768 while (curr_nb)
769 {
770 PNET_BUFFER next_nb;
771 PMDL curr_mdl;
772 shared_buffer_t *page_buf;
774 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
775 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
776 page_buf = NB_FIRST_PB(curr_nb);
777 while (curr_mdl)
778 {
779 shared_buffer_t *next_buf;
780 PMDL next_mdl;
782 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
783 next_mdl = curr_mdl->Next;
784 next_buf = page_buf->next;
785 if (!page_buf->virtual)
786 {
787 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
788 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
789 }
790 else
791 {
792 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
793 if (curr_mdl != page_buf->mdl)
794 {
795 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
796 IoFreeMdl(curr_mdl);
797 }
798 put_pb_on_freelist(xi, page_buf);
799 }
800 curr_mdl = next_mdl;
801 page_buf = next_buf;
802 }
804 NdisFreeNetBuffer(curr_nb);
805 InterlockedDecrement(&xi->rx_outstanding);
807 curr_nb = next_nb;
808 }
809 NdisFreeNetBufferList(curr_nbl);
810 curr_nbl = next_nbl;
811 }
813 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
814 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
816 //FUNCTION_EXIT();
817 }
818 #endif
820 /* We limit the number of packets per interrupt so that acks get a chance
821 under high rx load. The DPC is immediately re-scheduled */
823 #define MAXIMUM_PACKETS_PER_INDICATE 32
825 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
826 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
828 // Called at DISPATCH_LEVEL
829 BOOLEAN
830 XenNet_RxBufferCheck(struct xennet_info *xi)
831 {
832 RING_IDX cons, prod;
833 ULONG packet_count = 0;
834 ULONG packet_data = 0;
835 ULONG buffer_count = 0;
836 USHORT id;
837 int more_to_do = FALSE;
838 shared_buffer_t *page_buf;
839 #if NTDDI_VERSION < NTDDI_VISTA
840 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
841 PNDIS_PACKET first_header_only_packet;
842 PNDIS_PACKET last_header_only_packet;
843 #else
844 #endif
845 //ULONG nbl_count = 0;
846 ULONG interim_packet_data = 0;
847 struct netif_extra_info *ei;
848 rx_context_t rc;
849 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
850 shared_buffer_t *head_buf = NULL;
851 shared_buffer_t *tail_buf = NULL;
852 shared_buffer_t *last_buf = NULL;
853 BOOLEAN extra_info_flag = FALSE;
854 BOOLEAN more_data_flag = FALSE;
855 BOOLEAN dont_set_event;
856 //FUNCTION_ENTER();
858 #if NTDDI_VERSION < NTDDI_VISTA
859 rc.first_packet = NULL;
860 rc.last_packet = NULL;
861 rc.packet_count = 0;
862 #else
863 rc.first_nbl = NULL;
864 rc.last_nbl = NULL;
865 rc.packet_count = 0;
866 rc.nbl_count = 0;
867 #endif
869 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
870 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
872 if (xi->device_state != DEVICE_STATE_ACTIVE) {
873 /* there is a chance that our Dpc had been queued just before the shutdown... */
874 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
875 return FALSE;
876 }
878 if (xi->rx_partial_buf) {
879 head_buf = xi->rx_partial_buf;
880 tail_buf = xi->rx_partial_buf;
881 while (tail_buf->next)
882 tail_buf = tail_buf->next;
883 more_data_flag = xi->rx_partial_more_data_flag;
884 extra_info_flag = xi->rx_partial_extra_info_flag;
885 xi->rx_partial_buf = NULL;
886 }
888 do {
889 prod = xi->rx_ring.sring->rsp_prod;
890 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
892 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
893 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
894 page_buf = xi->rx_ring_pbs[id];
895 XN_ASSERT(page_buf);
896 xi->rx_ring_pbs[id] = NULL;
897 xi->rx_id_free++;
898 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
899 if (!extra_info_flag) {
900 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
901 KdPrint((__DRIVER_NAME " Error: rsp offset %d, size %d\n",
902 page_buf->rsp.offset, page_buf->rsp.status));
903 XN_ASSERT(!extra_info_flag);
904 put_pb_on_freelist(xi, page_buf);
905 continue;
906 }
907 }
909 if (!head_buf) {
910 head_buf = page_buf;
911 tail_buf = page_buf;
912 } else {
913 tail_buf->next = page_buf;
914 tail_buf = page_buf;
915 }
916 page_buf->next = NULL;
918 if (extra_info_flag) {
919 ei = (struct netif_extra_info *)&page_buf->rsp;
920 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
921 } else {
922 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
923 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
924 interim_packet_data += page_buf->rsp.status;
925 }
927 if (!extra_info_flag && !more_data_flag) {
928 last_buf = page_buf;
929 packet_count++;
930 packet_data += interim_packet_data;
931 interim_packet_data = 0;
932 }
933 buffer_count++;
934 }
935 xi->rx_ring.rsp_cons = cons;
937 /* Give netback more buffers */
938 XenNet_FillRing(xi);
940 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
941 break;
943 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
944 if (!more_to_do) {
945 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
946 KeMemoryBarrier();
947 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
948 }
949 } while (more_to_do);
951 /* anything past last_buf belongs to an incomplete packet... */
952 if (last_buf && last_buf->next)
953 {
954 KdPrint((__DRIVER_NAME " Partial receive\n"));
955 xi->rx_partial_buf = last_buf->next;
956 xi->rx_partial_more_data_flag = more_data_flag;
957 xi->rx_partial_extra_info_flag = extra_info_flag;
958 last_buf->next = NULL;
959 }
961 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
963 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
964 {
965 /* fire again immediately */
966 KdPrint((__DRIVER_NAME " Dpc Duration Exceeded\n"));
967 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
968 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
969 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
970 /* dont set an event in TX path */
971 dont_set_event = TRUE;
972 }
973 else
974 {
975 /* make sure the Dpc queue is run immediately next interrupt */
976 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
977 /* set an event in TX path */
978 dont_set_event = FALSE;
979 }
981 /* make packets out of the buffers */
982 page_buf = head_buf;
983 extra_info_flag = FALSE;
984 more_data_flag = FALSE;
986 while (page_buf) {
987 shared_buffer_t *next_buf = page_buf->next;
988 PMDL mdl;
990 page_buf->next = NULL;
991 if (extra_info_flag) {
992 //KdPrint((__DRIVER_NAME " processing extra info\n"));
993 ei = (struct netif_extra_info *)&page_buf->rsp;
994 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
995 switch (ei->type)
996 {
997 case XEN_NETIF_EXTRA_TYPE_GSO:
998 switch (ei->u.gso.type) {
999 case XEN_NETIF_GSO_TYPE_TCPV4:
1000 pi->mss = ei->u.gso.size;
1001 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1002 break;
1003 default:
1004 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
1005 break;
1007 break;
1008 default:
1009 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
1010 break;
1012 put_pb_on_freelist(xi, page_buf);
1013 } else {
1014 XN_ASSERT(!page_buf->rsp.offset);
1015 if (!more_data_flag) { // handling the packet's 1st buffer
1016 if (page_buf->rsp.flags & NETRXF_csum_blank)
1017 pi->csum_blank = TRUE;
1018 if (page_buf->rsp.flags & NETRXF_data_validated)
1019 pi->data_validated = TRUE;
1021 mdl = page_buf->mdl;
1022 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1023 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1024 if (pi->first_pb) {
1025 XN_ASSERT(pi->curr_pb);
1026 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1027 pi->curr_pb->next = page_buf;
1028 pi->curr_pb = page_buf;
1029 XN_ASSERT(pi->curr_mdl);
1030 pi->curr_mdl->Next = mdl;
1031 pi->curr_mdl = mdl;
1032 } else {
1033 pi->first_pb = page_buf;
1034 pi->curr_pb = page_buf;
1035 pi->first_mdl = mdl;
1036 pi->curr_mdl = mdl;
1038 //pi->mdl_count++;
1039 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1040 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1041 pi->total_length = pi->total_length + page_buf->rsp.status;
1044 /* Packet done, add it to the list */
1045 if (!more_data_flag && !extra_info_flag) {
1046 pi->curr_pb = pi->first_pb;
1047 pi->curr_mdl = pi->first_mdl;
1048 XenNet_MakePackets(xi, &rc, pi);
1051 page_buf = next_buf;
1053 XN_ASSERT(!more_data_flag && !extra_info_flag);
1055 #if NTDDI_VERSION < NTDDI_VISTA
1056 packet_count = 0;
1057 first_header_only_packet = NULL;
1058 last_header_only_packet = NULL;
1060 while (rc.first_packet) {
1061 PNDIS_PACKET packet;
1062 NDIS_STATUS status;
1064 packet = rc.first_packet;
1065 XN_ASSERT(PACKET_FIRST_PB(packet));
1066 rc.first_packet = PACKET_NEXT_PACKET(packet);
1067 status = NDIS_GET_PACKET_STATUS(packet);
1068 if (status == NDIS_STATUS_RESOURCES) {
1069 if (!first_header_only_packet) {
1070 first_header_only_packet = packet;
1071 } else {
1072 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1074 last_header_only_packet = packet;
1075 PACKET_NEXT_PACKET(packet) = NULL;
1077 packets[packet_count++] = packet;
1078 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1079 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1080 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1081 && status == NDIS_STATUS_RESOURCES)) {
1082 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1083 packet_count = 0;
1086 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1087 while (first_header_only_packet) {
1088 PNDIS_PACKET packet = first_header_only_packet;
1089 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1090 XenNet_ReturnPacket(xi, packet);
1092 #else
1093 if (rc.first_nbl) {
1094 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1095 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1096 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1097 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1098 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1100 #endif
1101 //FUNCTION_EXIT();
1102 return dont_set_event;
1105 static VOID
1106 XenNet_BufferFree(xennet_info_t *xi)
1108 shared_buffer_t *sb;
1109 int i;
1111 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1112 if (xi->rx_ring_pbs[i] != NULL) {
1113 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1114 xi->rx_ring_pbs[i] = NULL;
1118 /* because we are shutting down this won't allocate new ones */
1119 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1120 XnEndAccess(xi->handle,
1121 sb->gref, FALSE, (ULONG)'XNRX');
1122 IoFreeMdl(sb->mdl);
1123 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1124 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1126 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1127 IoFreeMdl(sb->mdl);
1128 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1132 BOOLEAN
1133 XenNet_RxInit(xennet_info_t *xi) {
1134 #if NTDDI_VERSION < NTDDI_VISTA
1135 NDIS_STATUS status;
1136 #else
1137 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1138 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1139 #endif
1140 int ret;
1141 int i;
1143 FUNCTION_ENTER();
1145 // this stuff needs to be done once only...
1146 KeInitializeSpinLock(&xi->rx_lock);
1147 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1148 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1149 if (!xi->rxpi) {
1150 KdPrint(("ExAllocatePoolWithTagPriority failed\n"));
1151 return FALSE;
1153 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1155 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1156 if (!ret) {
1157 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1158 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1159 return FALSE;
1161 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1162 if (!ret) {
1163 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1164 stack_delete(xi->rx_pb_stack, NULL, NULL);
1165 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1166 return FALSE;
1169 xi->rx_id_free = NET_RX_RING_SIZE;
1170 xi->rx_outstanding = 0;
1172 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1173 xi->rx_ring_pbs[i] = NULL;
1176 #if NTDDI_VERSION < NTDDI_VISTA
1177 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1178 if (status != NDIS_STATUS_SUCCESS) {
1179 KdPrint(("NdisAllocatePacketPool failed with 0x%x\n", status));
1180 return FALSE;
1182 #else
1183 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1184 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1185 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1186 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1187 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1188 nbl_pool_parameters.ContextSize = 0;
1189 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1190 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1192 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1193 if (!xi->rx_nbl_pool) {
1194 KdPrint(("NdisAllocateNetBufferListPool failed\n"));
1195 return FALSE;
1198 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1199 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1200 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1201 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1202 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1203 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1204 if (!xi->rx_packet_pool) {
1205 KdPrint(("NdisAllocateNetBufferPool (rx_packet_pool) failed\n"));
1206 return FALSE;
1208 #endif
1209 XenNet_FillRing(xi);
1211 FUNCTION_EXIT();
1213 return TRUE;
1216 VOID
1217 XenNet_RxShutdown(xennet_info_t *xi) {
1218 KIRQL old_irql;
1219 UNREFERENCED_PARAMETER(xi);
1221 FUNCTION_ENTER();
1223 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1224 while (xi->rx_outstanding) {
1225 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1226 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1227 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1228 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1230 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1232 XenNet_BufferFree(xi);
1234 stack_delete(xi->rx_pb_stack, NULL, NULL);
1235 stack_delete(xi->rx_hb_stack, NULL, NULL);
1238 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1240 #if NTDDI_VERSION < NTDDI_VISTA
1241 NdisFreePacketPool(xi->rx_packet_pool);
1242 #else
1243 NdisFreeNetBufferPool(xi->rx_packet_pool);
1244 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1245 #endif
1247 FUNCTION_EXIT();
1248 return;