win-pvdrivers

view xennet/xennet_rx.c @ 1033:cb767700f91c

Correctly initialise pi values and set header size based on lookahead
author James Harper <james.harper@bendigoit.com.au>
date Sun Mar 03 13:49:54 2013 +1100 (2013-03-03)
parents 1ce315b193d1
children fba0ce4d9e54
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static __inline shared_buffer_t *
24 get_pb_from_freelist(struct xennet_info *xi)
25 {
26 shared_buffer_t *pb;
27 PVOID ptr_ref;
29 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
30 {
31 pb = ptr_ref;
32 pb->ref_count = 1;
33 InterlockedDecrement(&xi->rx_pb_free);
34 return pb;
35 }
37 /* don't allocate a new one if we are shutting down */
38 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
39 return NULL;
41 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
42 if (!pb)
43 return NULL;
44 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
45 if (!pb->virtual)
46 {
47 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
48 return NULL;
49 }
50 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
51 if (!pb->mdl)
52 {
53 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
54 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
55 return NULL;
56 }
57 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
58 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
59 if (pb->gref == INVALID_GRANT_REF)
60 {
61 IoFreeMdl(pb->mdl);
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 MmBuildMdlForNonPagedPool(pb->mdl);
67 pb->ref_count = 1;
68 return pb;
69 }
71 static __inline VOID
72 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
73 {
74 UNREFERENCED_PARAMETER(xi);
75 InterlockedIncrement(&pb->ref_count);
76 }
78 static __inline VOID
79 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
80 {
81 if (InterlockedDecrement(&pb->ref_count) == 0)
82 {
83 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
84 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
85 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
86 {
87 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
88 IoFreeMdl(pb->mdl);
89 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
90 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
91 return;
92 }
93 pb->mdl->ByteCount = PAGE_SIZE;
94 pb->mdl->Next = NULL;
95 pb->next = NULL;
96 stack_push(xi->rx_pb_stack, pb);
97 InterlockedIncrement(&xi->rx_pb_free);
98 }
99 }
101 static __inline shared_buffer_t *
102 get_hb_from_freelist(struct xennet_info *xi)
103 {
104 shared_buffer_t *hb;
105 PVOID ptr_ref;
107 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
108 {
109 hb = ptr_ref;
110 InterlockedDecrement(&xi->rx_hb_free);
111 return hb;
112 }
114 /* don't allocate a new one if we are shutting down */
115 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
116 return NULL;
118 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
119 if (!hb)
120 return NULL;
121 NdisZeroMemory(hb, sizeof(shared_buffer_t));
122 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
123 if (!hb->mdl) {
124 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
125 return NULL;
126 }
127 MmBuildMdlForNonPagedPool(hb->mdl);
128 return hb;
129 }
131 static __inline VOID
132 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
133 {
134 XN_ASSERT(xi);
135 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
136 hb->mdl->Next = NULL;
137 hb->next = NULL;
138 stack_push(xi->rx_hb_stack, hb);
139 InterlockedIncrement(&xi->rx_hb_free);
140 }
142 // Called at DISPATCH_LEVEL with rx lock held
143 static VOID
144 XenNet_FillRing(struct xennet_info *xi)
145 {
146 unsigned short id;
147 shared_buffer_t *page_buf;
148 ULONG i, notify;
149 ULONG batch_target;
150 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
151 netif_rx_request_t *req;
153 //FUNCTION_ENTER();
155 if (xi->device_state != DEVICE_STATE_ACTIVE)
156 return;
158 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
160 if (batch_target < (xi->rx_target >> 2)) {
161 //FUNCTION_EXIT();
162 return; /* only refill if we are less than 3/4 full already */
163 }
165 for (i = 0; i < batch_target; i++) {
166 page_buf = get_pb_from_freelist(xi);
167 if (!page_buf) {
168 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target));
169 break;
170 }
171 xi->rx_id_free--;
173 /* Give to netback */
174 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
175 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
176 xi->rx_ring_pbs[id] = page_buf;
177 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
178 req->id = id;
179 req->gref = page_buf->gref;
180 XN_ASSERT(req->gref != INVALID_GRANT_REF);
181 }
182 KeMemoryBarrier();
183 xi->rx_ring.req_prod_pvt = req_prod + i;
184 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
185 if (notify) {
186 XnNotify(xi->handle, xi->event_channel);
187 }
189 //FUNCTION_EXIT();
191 return;
192 }
194 #if NTDDI_VERSION < NTDDI_VISTA
195 typedef struct {
196 PNDIS_PACKET first_packet;
197 PNDIS_PACKET last_packet;
198 ULONG packet_count;
199 } rx_context_t;
200 #else
201 typedef struct {
202 PNET_BUFFER_LIST first_nbl;
203 PNET_BUFFER_LIST last_nbl;
204 ULONG packet_count;
205 ULONG nbl_count;
206 } rx_context_t;
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 /*
211 NDIS5 appears to insist that the checksum on received packets is correct, and won't
212 believe us when we lie about it, which happens when the packet is generated on the
213 same bridge in Dom0. Doh!
214 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
215 */
217 static BOOLEAN
218 XenNet_SumPacketData(
219 packet_info_t *pi,
220 PNDIS_PACKET packet,
221 BOOLEAN set_csum) {
222 USHORT i;
223 PUCHAR buffer;
224 PMDL mdl;
225 UINT total_length;
226 UINT data_length;
227 UINT buffer_length;
228 USHORT buffer_offset;
229 ULONG csum;
230 PUSHORT csum_ptr;
231 USHORT remaining;
232 USHORT ip4_length;
233 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
235 //FUNCTION_ENTER();
237 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
238 if (!buffer) {
239 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
240 return FALSE;
241 }
242 XN_ASSERT(mdl);
244 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
245 data_length = ip4_length + XN_HDR_SIZE;
247 if ((USHORT)data_length > total_length) {
248 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
249 return FALSE;
250 }
252 switch (pi->ip_proto) {
253 case 6:
254 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
255 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
256 break;
257 case 17:
258 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
259 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
260 break;
261 default:
262 KdPrint((__DRIVER_NAME " Don't know how to calc sum for IP Proto %d\n", pi->ip_proto));
263 //FUNCTION_EXIT();
264 return FALSE; // should never happen
265 }
267 if (set_csum)
268 *csum_ptr = 0;
270 csum = 0;
271 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
272 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
273 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
275 remaining = ip4_length - pi->ip4_header_length;
277 csum += remaining;
279 csum_span = FALSE;
280 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
281 while (i < data_length) {
282 /* don't include the checksum field itself in the calculation */
283 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
284 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
285 i += 2;
286 buffer_offset += 2;
287 continue;
288 }
289 if (csum_span) {
290 /* the other half of the next bit */
291 XN_ASSERT(buffer_offset == 0);
292 csum += (USHORT)buffer[buffer_offset];
293 csum_span = FALSE;
294 i += 1;
295 buffer_offset += 1;
296 } else if (buffer_offset == buffer_length - 1) {
297 /* deal with a buffer ending on an odd byte boundary */
298 csum += (USHORT)buffer[buffer_offset] << 8;
299 csum_span = TRUE;
300 i += 1;
301 buffer_offset += 1;
302 } else {
303 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
304 i += 2;
305 buffer_offset += 2;
306 }
307 if (buffer_offset == buffer_length && i < total_length) {
308 NdisGetNextBuffer(mdl, &mdl);
309 if (mdl == NULL) {
310 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
311 return FALSE; // should never happen
312 }
313 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
314 XN_ASSERT(buffer_length);
315 buffer_offset = 0;
316 }
317 }
319 while (csum & 0xFFFF0000)
320 csum = (csum & 0xFFFF) + (csum >> 16);
322 if (set_csum) {
323 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
324 } else {
325 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
326 }
327 return TRUE;
328 }
329 #endif
331 static BOOLEAN
332 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
333 #if NTDDI_VERSION < NTDDI_VISTA
334 NDIS_STATUS status;
335 PNDIS_PACKET packet;
336 #else
337 PNET_BUFFER_LIST nbl;
338 PNET_BUFFER packet;
339 #endif
340 PMDL mdl_head, mdl_tail, curr_mdl;
341 PUCHAR header_va;
342 ULONG out_remaining;
343 ULONG header_extra;
344 shared_buffer_t *header_buf;
345 #if NTDDI_VERSION < NTDDI_VISTA
346 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
347 UINT packet_length;
348 #else
349 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
350 #endif
351 //FUNCTION_ENTER();
353 #if NTDDI_VERSION < NTDDI_VISTA
354 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
355 if (status != NDIS_STATUS_SUCCESS) {
356 FUNCTION_MSG("No free packets\n");
357 return FALSE;
358 }
359 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
360 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
361 #else
362 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
363 if (!nbl) {
364 /* buffers will be freed in MakePackets */
365 KdPrint((__DRIVER_NAME " No free nbls\n"));
366 //FUNCTION_EXIT();
367 return FALSE;
368 }
370 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
371 if (!packet) {
372 KdPrint((__DRIVER_NAME " No free packets\n"));
373 NdisFreeNetBufferList(nbl);
374 //FUNCTION_EXIT();
375 return FALSE;
376 }
377 #endif
379 if (!pi->first_mdl->Next && !pi->split_required) {
380 /* a single buffer <= MTU */
381 header_buf = NULL;
382 XenNet_BuildHeader(pi, pi->first_mdl_virtual, pi->first_mdl_length);
383 #if NTDDI_VERSION < NTDDI_VISTA
384 NdisChainBufferAtBack(packet, pi->first_mdl);
385 PACKET_FIRST_PB(packet) = pi->first_pb;
386 #else
387 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
388 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
389 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
390 NET_BUFFER_DATA_OFFSET(packet) = 0;
391 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
392 NB_FIRST_PB(packet) = pi->first_pb;
393 #endif
394 ref_pb(xi, pi->first_pb);
395 } else {
396 XN_ASSERT(ndis_os_minor_version >= 1);
397 header_buf = get_hb_from_freelist(xi);
398 if (!header_buf) {
399 FUNCTION_MSG("No free header buffers\n");
400 #if NTDDI_VERSION < NTDDI_VISTA
401 NdisUnchainBufferAtFront(packet, &curr_mdl);
402 NdisFreePacket(packet);
403 #else
404 NdisFreeNetBufferList(nbl);
405 NdisFreeNetBuffer(packet);
406 #endif
407 return FALSE;
408 }
409 header_va = (PUCHAR)(header_buf + 1);
410 NdisMoveMemory(header_va, pi->header, pi->header_length);
411 //if (pi->ip_proto == 50) {
412 // FUNCTION_MSG("header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead);
413 // FUNCTION_MSG("ip4_header_length = %d\n", pi->ip4_header_length);
414 // FUNCTION_MSG("tcp_header_length = %d\n", pi->tcp_header_length);
415 //}
416 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
417 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
418 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
419 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
420 header_buf->mdl->ByteCount = pi->header_length;
421 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
422 #if NTDDI_VERSION < NTDDI_VISTA
423 PACKET_FIRST_PB(packet) = header_buf;
424 header_buf->next = pi->curr_pb;
425 NdisChainBufferAtBack(packet, mdl_head);
426 #else
427 NB_FIRST_PB(packet) = header_buf;
428 header_buf->next = pi->curr_pb;
429 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
430 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
431 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
432 NET_BUFFER_DATA_OFFSET(packet) = 0;
433 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
434 #endif
436 if (pi->split_required) {
437 /* must be ip4 */
438 ULONG tcp_length;
439 USHORT new_ip4_length;
440 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
441 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
442 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
443 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
444 pi->tcp_seq += tcp_length;
445 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
446 /* part of the packet is already present in the header buffer for lookahead */
447 out_remaining = tcp_length - header_extra;
448 XN_ASSERT((LONG)out_remaining >= 0);
449 } else {
450 out_remaining = pi->total_length - pi->header_length;
451 XN_ASSERT((LONG)out_remaining >= 0);
452 }
454 while (out_remaining != 0) {
455 //ULONG in_buffer_offset;
456 ULONG in_buffer_length;
457 ULONG out_length;
459 //if (pi->ip_proto == 50) {
460 // FUNCTION_MSG("in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb);
461 //}
462 if (!pi->curr_mdl || !pi->curr_pb) {
463 KdPrint((__DRIVER_NAME " out of buffers for packet\n"));
464 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
465 // TODO: free some stuff or we'll leak
466 /* unchain buffers then free packet */
467 //FUNCTION_EXIT();
468 return FALSE;
469 }
471 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
472 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
473 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
474 XN_ASSERT(curr_mdl);
475 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
476 mdl_tail->Next = curr_mdl;
477 mdl_tail = curr_mdl;
478 curr_mdl->Next = NULL; /* I think this might be redundant */
479 #if NTDDI_VERSION < NTDDI_VISTA
480 #else
481 NET_BUFFER_DATA_LENGTH(packet) += out_length;
482 #endif
483 ref_pb(xi, pi->curr_pb);
484 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
485 if (pi->curr_mdl_offset == in_buffer_length) {
486 pi->curr_mdl = pi->curr_mdl->Next;
487 pi->curr_pb = pi->curr_pb->next;
488 pi->curr_mdl_offset = 0;
489 }
490 out_remaining -= out_length;
491 }
492 if (pi->split_required) {
493 // TODO: only if Ip checksum is disabled...
494 //XenNet_SumIpHeader(header_va, pi->ip4_header_length);
495 }
496 if (header_extra > 0)
497 pi->header_length -= header_extra;
498 }
500 rc->packet_count++;
501 #if NTDDI_VERSION < NTDDI_VISTA
502 #else
503 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
504 #endif
506 if (pi->parse_result == PARSE_OK) {
507 #if NTDDI_VERSION < NTDDI_VISTA
508 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
509 packet, TcpIpChecksumPacketInfo);
510 XN_ASSERT(csum_info->Value == 0);
511 if (pi->csum_blank || pi->data_validated) {
512 BOOLEAN checksum_offload = FALSE;
513 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
514 if (xi->setting_csum.V4Receive.IpChecksum) {
515 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
516 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
517 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
518 else
519 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
520 }
521 }
522 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
523 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
524 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
525 checksum_offload = TRUE;
526 }
527 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
528 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
529 checksum_offload = TRUE;
530 }
531 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
532 XenNet_SumPacketData(pi, packet, TRUE);
533 }
534 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
535 if (xi->setting_csum.V4Receive.IpChecksum) {
536 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
537 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
538 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
539 else
540 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
541 }
542 }
543 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
544 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
545 if (XenNet_SumPacketData(pi, packet, FALSE)) {
546 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
547 } else {
548 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
549 }
550 }
551 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
552 if (XenNet_SumPacketData(pi, packet, FALSE)) {
553 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
554 } else {
555 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
556 }
557 }
558 }
559 #else
560 csum_info.Value = 0;
561 if (pi->csum_blank || pi->data_validated || pi->mss) {
562 if (pi->ip_proto == 6) {
563 csum_info.Receive.IpChecksumSucceeded = TRUE;
564 csum_info.Receive.TcpChecksumSucceeded = TRUE;
565 } else if (pi->ip_proto == 17) {
566 csum_info.Receive.IpChecksumSucceeded = TRUE;
567 csum_info.Receive.UdpChecksumSucceeded = TRUE;
568 }
569 }
570 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
571 #endif
572 }
574 #if NTDDI_VERSION < NTDDI_VISTA
575 if (!rc->first_packet) {
576 rc->first_packet = packet;
577 } else {
578 PACKET_NEXT_PACKET(rc->last_packet) = packet;
579 }
580 rc->last_packet = packet;
581 rc->packet_count++;
582 #else
583 if (!rc->first_nbl) {
584 rc->first_nbl = nbl;
585 } else {
586 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
587 }
588 rc->last_nbl = nbl;
589 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
590 rc->nbl_count++;
591 if (pi->is_multicast) {
592 /* multicast */
593 xi->stats.ifHCInMulticastPkts++;
594 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
595 } else if (pi->is_broadcast) {
596 /* broadcast */
597 xi->stats.ifHCInBroadcastPkts++;
598 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
599 } else {
600 /* unicast */
601 xi->stats.ifHCInUcastPkts++;
602 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
603 }
604 #endif
606 #if NTDDI_VERSION < NTDDI_VISTA
607 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
608 NdisQueryPacketLength(packet, &packet_length);
609 if (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH)
610 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
611 else
612 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
613 #endif
614 //FUNCTION_EXIT();
616 InterlockedIncrement(&xi->rx_outstanding);
617 //FUNCTION_EXIT();
618 return TRUE;
619 }
621 static VOID
622 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
623 {
624 UCHAR psh;
625 shared_buffer_t *page_buf;
627 XenNet_ParsePacketHeader(pi, NULL, XN_HDR_SIZE + xi->current_lookahead);
629 if (!XenNet_FilterAcceptPacket(xi, pi)) {
630 goto done;
631 }
633 if (pi->split_required) {
634 #if NTDDI_VERSION < NTDDI_VISTA
635 /* need to split to mss for NDIS5 */
636 #else
637 switch (xi->current_gso_rx_split_type) {
638 case RX_LSO_SPLIT_HALF:
639 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
640 break;
641 case RX_LSO_SPLIT_NONE:
642 pi->mss = 65535;
643 break;
644 }
645 #endif
646 }
648 switch (pi->ip_proto) {
649 case 6: // TCP
650 if (pi->split_required)
651 break;
652 /* fall through */
653 case 17: // UDP
654 if (!XenNet_MakePacket(xi, rc, pi)) {
655 FUNCTION_MSG("Failed to make packet\n");
656 #if NTDDI_VERSION < NTDDI_VISTA
657 xi->stat_rx_no_buffer++;
658 #else
659 xi->stats.ifInDiscards++;
660 #endif
661 goto done;
662 }
663 goto done;
664 default:
665 if (!XenNet_MakePacket(xi, rc, pi)) {
666 FUNCTION_MSG("Failed to make packet\n");
667 #if NTDDI_VERSION < NTDDI_VISTA
668 xi->stat_rx_no_buffer++;
669 #else
670 xi->stats.ifInDiscards++;
671 #endif
672 goto done;
673 }
674 goto done;
675 }
677 /* this is the split_required code */
678 pi->tcp_remaining = pi->tcp_length;
680 /* we can make certain assumptions here as the following code is only for tcp4 */
681 psh = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] & 8;
682 while (pi->tcp_remaining) {
683 if (!XenNet_MakePacket(xi, rc, pi)) {
684 FUNCTION_MSG("Failed to make packet\n");
685 #if NTDDI_VERSION < NTDDI_VISTA
686 xi->stat_rx_no_buffer++;
687 #else
688 xi->stats.ifInDiscards++;
689 #endif
690 break; /* we are out of memory - just drop the packets */
691 }
692 if (psh) {
693 if (pi->tcp_remaining)
694 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= ~8;
695 else
696 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] |= 8;
697 }
698 //XenNet_SumPacketData(pi, packet, TRUE);
699 //entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
700 //InsertTailList(rx_packet_list, entry);
701 }
702 done:
703 page_buf = pi->first_pb;
704 while (page_buf) {
705 shared_buffer_t *next_pb = page_buf->next;
706 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
707 page_buf = next_pb;
708 }
709 XenNet_ClearPacketInfo(pi);
710 //FUNCTION_EXIT();
711 return;
712 }
714 #if NTDDI_VERSION < NTDDI_VISTA
715 /* called at DISPATCH_LEVEL */
716 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
717 VOID
718 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
719 struct xennet_info *xi = adapter_context;
720 PNDIS_BUFFER buffer;
721 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
723 //FUNCTION_ENTER();
724 NdisUnchainBufferAtFront(packet, &buffer);
726 while (buffer) {
727 shared_buffer_t *next_buf;
728 XN_ASSERT(page_buf);
729 next_buf = page_buf->next;
730 if (!page_buf->virtual) {
731 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
732 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
733 } else {
734 if (buffer != page_buf->mdl)
735 NdisFreeBuffer(buffer);
736 put_pb_on_freelist(xi, page_buf);
737 }
738 NdisUnchainBufferAtFront(packet, &buffer);
739 page_buf = next_buf;
740 }
742 NdisFreePacket(packet);
743 InterlockedDecrement(&xi->rx_outstanding);
744 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
745 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
746 //FUNCTION_EXIT();
747 }
748 #else
749 /* called at <= DISPATCH_LEVEL */
750 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
751 VOID
752 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
753 {
754 struct xennet_info *xi = adapter_context;
755 UNREFERENCED_PARAMETER(return_flags);
757 //FUNCTION_ENTER();
759 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
761 XN_ASSERT(xi);
762 while (curr_nbl)
763 {
764 PNET_BUFFER_LIST next_nbl;
765 PNET_BUFFER curr_nb;
767 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
768 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
769 while (curr_nb)
770 {
771 PNET_BUFFER next_nb;
772 PMDL curr_mdl;
773 shared_buffer_t *page_buf;
775 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
776 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
777 page_buf = NB_FIRST_PB(curr_nb);
778 while (curr_mdl)
779 {
780 shared_buffer_t *next_buf;
781 PMDL next_mdl;
783 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
784 next_mdl = curr_mdl->Next;
785 next_buf = page_buf->next;
786 if (!page_buf->virtual)
787 {
788 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
789 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
790 }
791 else
792 {
793 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
794 if (curr_mdl != page_buf->mdl)
795 {
796 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
797 IoFreeMdl(curr_mdl);
798 }
799 put_pb_on_freelist(xi, page_buf);
800 }
801 curr_mdl = next_mdl;
802 page_buf = next_buf;
803 }
805 NdisFreeNetBuffer(curr_nb);
806 InterlockedDecrement(&xi->rx_outstanding);
808 curr_nb = next_nb;
809 }
810 NdisFreeNetBufferList(curr_nbl);
811 curr_nbl = next_nbl;
812 }
814 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
815 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
817 //FUNCTION_EXIT();
818 }
819 #endif
821 /* We limit the number of packets per interrupt so that acks get a chance
822 under high rx load. The DPC is immediately re-scheduled */
824 #define MAXIMUM_PACKETS_PER_INDICATE 32
826 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
827 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
829 // Called at DISPATCH_LEVEL
830 BOOLEAN
831 XenNet_RxBufferCheck(struct xennet_info *xi)
832 {
833 RING_IDX cons, prod;
834 ULONG packet_count = 0;
835 ULONG packet_data = 0;
836 ULONG buffer_count = 0;
837 USHORT id;
838 int more_to_do = FALSE;
839 shared_buffer_t *page_buf;
840 #if NTDDI_VERSION < NTDDI_VISTA
841 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
842 PNDIS_PACKET first_header_only_packet;
843 PNDIS_PACKET last_header_only_packet;
844 #else
845 #endif
846 //ULONG nbl_count = 0;
847 ULONG interim_packet_data = 0;
848 struct netif_extra_info *ei;
849 rx_context_t rc;
850 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
851 shared_buffer_t *head_buf = NULL;
852 shared_buffer_t *tail_buf = NULL;
853 shared_buffer_t *last_buf = NULL;
854 BOOLEAN extra_info_flag = FALSE;
855 BOOLEAN more_data_flag = FALSE;
856 BOOLEAN dont_set_event;
857 //FUNCTION_ENTER();
859 #if NTDDI_VERSION < NTDDI_VISTA
860 rc.first_packet = NULL;
861 rc.last_packet = NULL;
862 rc.packet_count = 0;
863 #else
864 rc.first_nbl = NULL;
865 rc.last_nbl = NULL;
866 rc.packet_count = 0;
867 rc.nbl_count = 0;
868 #endif
870 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
871 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
873 if (xi->device_state != DEVICE_STATE_ACTIVE) {
874 /* there is a chance that our Dpc had been queued just before the shutdown... */
875 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
876 return FALSE;
877 }
879 if (xi->rx_partial_buf) {
880 head_buf = xi->rx_partial_buf;
881 tail_buf = xi->rx_partial_buf;
882 while (tail_buf->next)
883 tail_buf = tail_buf->next;
884 more_data_flag = xi->rx_partial_more_data_flag;
885 extra_info_flag = xi->rx_partial_extra_info_flag;
886 xi->rx_partial_buf = NULL;
887 }
889 do {
890 prod = xi->rx_ring.sring->rsp_prod;
891 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
893 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
894 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
895 page_buf = xi->rx_ring_pbs[id];
896 XN_ASSERT(page_buf);
897 xi->rx_ring_pbs[id] = NULL;
898 xi->rx_id_free++;
899 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
900 if (!extra_info_flag) {
901 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
902 KdPrint((__DRIVER_NAME " Error: rsp offset %d, size %d\n",
903 page_buf->rsp.offset, page_buf->rsp.status));
904 XN_ASSERT(!extra_info_flag);
905 put_pb_on_freelist(xi, page_buf);
906 continue;
907 }
908 }
910 if (!head_buf) {
911 head_buf = page_buf;
912 tail_buf = page_buf;
913 } else {
914 tail_buf->next = page_buf;
915 tail_buf = page_buf;
916 }
917 page_buf->next = NULL;
919 if (extra_info_flag) {
920 ei = (struct netif_extra_info *)&page_buf->rsp;
921 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
922 } else {
923 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
924 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
925 interim_packet_data += page_buf->rsp.status;
926 }
928 if (!extra_info_flag && !more_data_flag) {
929 last_buf = page_buf;
930 packet_count++;
931 packet_data += interim_packet_data;
932 interim_packet_data = 0;
933 }
934 buffer_count++;
935 }
936 xi->rx_ring.rsp_cons = cons;
938 /* Give netback more buffers */
939 XenNet_FillRing(xi);
941 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
942 break;
944 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
945 if (!more_to_do) {
946 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
947 KeMemoryBarrier();
948 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
949 }
950 } while (more_to_do);
952 /* anything past last_buf belongs to an incomplete packet... */
953 if (last_buf && last_buf->next)
954 {
955 KdPrint((__DRIVER_NAME " Partial receive\n"));
956 xi->rx_partial_buf = last_buf->next;
957 xi->rx_partial_more_data_flag = more_data_flag;
958 xi->rx_partial_extra_info_flag = extra_info_flag;
959 last_buf->next = NULL;
960 }
962 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
964 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
965 {
966 /* fire again immediately */
967 KdPrint((__DRIVER_NAME " Dpc Duration Exceeded\n"));
968 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
969 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
970 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
971 /* dont set an event in TX path */
972 dont_set_event = TRUE;
973 }
974 else
975 {
976 /* make sure the Dpc queue is run immediately next interrupt */
977 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
978 /* set an event in TX path */
979 dont_set_event = FALSE;
980 }
982 /* make packets out of the buffers */
983 page_buf = head_buf;
984 extra_info_flag = FALSE;
985 more_data_flag = FALSE;
987 while (page_buf) {
988 shared_buffer_t *next_buf = page_buf->next;
989 PMDL mdl;
991 page_buf->next = NULL;
992 if (extra_info_flag) {
993 //KdPrint((__DRIVER_NAME " processing extra info\n"));
994 ei = (struct netif_extra_info *)&page_buf->rsp;
995 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
996 switch (ei->type)
997 {
998 case XEN_NETIF_EXTRA_TYPE_GSO:
999 switch (ei->u.gso.type) {
1000 case XEN_NETIF_GSO_TYPE_TCPV4:
1001 pi->mss = ei->u.gso.size;
1002 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1003 break;
1004 default:
1005 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
1006 break;
1008 break;
1009 default:
1010 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
1011 break;
1013 put_pb_on_freelist(xi, page_buf);
1014 } else {
1015 XN_ASSERT(!page_buf->rsp.offset);
1016 if (!more_data_flag) { // handling the packet's 1st buffer
1017 if (page_buf->rsp.flags & NETRXF_csum_blank)
1018 pi->csum_blank = TRUE;
1019 if (page_buf->rsp.flags & NETRXF_data_validated)
1020 pi->data_validated = TRUE;
1022 mdl = page_buf->mdl;
1023 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1024 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1025 if (pi->first_pb) {
1026 XN_ASSERT(pi->curr_pb);
1027 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1028 pi->curr_pb->next = page_buf;
1029 pi->curr_pb = page_buf;
1030 XN_ASSERT(pi->curr_mdl);
1031 pi->curr_mdl->Next = mdl;
1032 pi->curr_mdl = mdl;
1033 } else {
1034 pi->first_pb = page_buf;
1035 pi->curr_pb = page_buf;
1036 pi->first_mdl = mdl;
1037 pi->curr_mdl = mdl;
1039 //pi->mdl_count++;
1040 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1041 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1042 pi->total_length = pi->total_length + page_buf->rsp.status;
1045 /* Packet done, add it to the list */
1046 if (!more_data_flag && !extra_info_flag) {
1047 pi->curr_pb = pi->first_pb;
1048 pi->curr_mdl = pi->first_mdl;
1049 XenNet_MakePackets(xi, &rc, pi);
1052 page_buf = next_buf;
1054 XN_ASSERT(!more_data_flag && !extra_info_flag);
1056 #if NTDDI_VERSION < NTDDI_VISTA
1057 packet_count = 0;
1058 first_header_only_packet = NULL;
1059 last_header_only_packet = NULL;
1061 while (rc.first_packet) {
1062 PNDIS_PACKET packet;
1063 NDIS_STATUS status;
1065 packet = rc.first_packet;
1066 XN_ASSERT(PACKET_FIRST_PB(packet));
1067 rc.first_packet = PACKET_NEXT_PACKET(packet);
1068 status = NDIS_GET_PACKET_STATUS(packet);
1069 if (status == NDIS_STATUS_RESOURCES) {
1070 if (!first_header_only_packet) {
1071 first_header_only_packet = packet;
1072 } else {
1073 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1075 last_header_only_packet = packet;
1076 PACKET_NEXT_PACKET(packet) = NULL;
1078 packets[packet_count++] = packet;
1079 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1080 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1081 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1082 && status == NDIS_STATUS_RESOURCES)) {
1083 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1084 packet_count = 0;
1087 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1088 while (first_header_only_packet) {
1089 PNDIS_PACKET packet = first_header_only_packet;
1090 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1091 XenNet_ReturnPacket(xi, packet);
1093 #else
1094 if (rc.first_nbl) {
1095 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1096 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1097 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1098 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1099 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1101 #endif
1102 //FUNCTION_EXIT();
1103 return dont_set_event;
1106 static VOID
1107 XenNet_BufferFree(xennet_info_t *xi)
1109 shared_buffer_t *sb;
1110 int i;
1112 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1113 if (xi->rx_ring_pbs[i] != NULL) {
1114 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1115 xi->rx_ring_pbs[i] = NULL;
1119 /* because we are shutting down this won't allocate new ones */
1120 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1121 XnEndAccess(xi->handle,
1122 sb->gref, FALSE, (ULONG)'XNRX');
1123 IoFreeMdl(sb->mdl);
1124 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1125 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1127 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1128 IoFreeMdl(sb->mdl);
1129 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1133 BOOLEAN
1134 XenNet_RxInit(xennet_info_t *xi) {
1135 #if NTDDI_VERSION < NTDDI_VISTA
1136 NDIS_STATUS status;
1137 #else
1138 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1139 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1140 #endif
1141 int ret;
1142 int i;
1144 FUNCTION_ENTER();
1146 // this stuff needs to be done once only...
1147 KeInitializeSpinLock(&xi->rx_lock);
1148 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1149 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1150 if (!xi->rxpi) {
1151 KdPrint(("ExAllocatePoolWithTagPriority failed\n"));
1152 return FALSE;
1154 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1156 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1157 if (!ret) {
1158 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1159 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1160 return FALSE;
1162 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1163 if (!ret) {
1164 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1165 stack_delete(xi->rx_pb_stack, NULL, NULL);
1166 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1167 return FALSE;
1170 xi->rx_id_free = NET_RX_RING_SIZE;
1171 xi->rx_outstanding = 0;
1173 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1174 xi->rx_ring_pbs[i] = NULL;
1177 #if NTDDI_VERSION < NTDDI_VISTA
1178 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1179 if (status != NDIS_STATUS_SUCCESS) {
1180 KdPrint(("NdisAllocatePacketPool failed with 0x%x\n", status));
1181 return FALSE;
1183 #else
1184 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1185 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1186 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1187 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1188 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1189 nbl_pool_parameters.ContextSize = 0;
1190 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1191 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1193 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1194 if (!xi->rx_nbl_pool) {
1195 KdPrint(("NdisAllocateNetBufferListPool failed\n"));
1196 return FALSE;
1199 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1200 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1201 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1202 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1203 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1204 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1205 if (!xi->rx_packet_pool) {
1206 KdPrint(("NdisAllocateNetBufferPool (rx_packet_pool) failed\n"));
1207 return FALSE;
1209 #endif
1210 XenNet_FillRing(xi);
1212 FUNCTION_EXIT();
1214 return TRUE;
1217 VOID
1218 XenNet_RxShutdown(xennet_info_t *xi) {
1219 KIRQL old_irql;
1220 UNREFERENCED_PARAMETER(xi);
1222 FUNCTION_ENTER();
1224 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1225 while (xi->rx_outstanding) {
1226 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1227 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1228 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1229 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1231 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1233 XenNet_BufferFree(xi);
1235 stack_delete(xi->rx_pb_stack, NULL, NULL);
1236 stack_delete(xi->rx_hb_stack, NULL, NULL);
1239 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1241 #if NTDDI_VERSION < NTDDI_VISTA
1242 NdisFreePacketPool(xi->rx_packet_pool);
1243 #else
1244 NdisFreeNetBufferPool(xi->rx_packet_pool);
1245 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1246 #endif
1248 FUNCTION_EXIT();
1249 return;