win-pvdrivers

view xennet/xennet_rx.c @ 1099:27bd2a5a4704

License change from GPL to BSD
author James Harper <james.harper@bendigoit.com.au>
date Thu Mar 13 13:38:31 2014 +1100 (2014-03-13)
parents 5fa56ef930bf
children 86a97ef8cdf9
line source
1 /*
2 PV Drivers for Windows Xen HVM Domains
4 Copyright (c) 2014, James Harper
5 All rights reserved.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11 * Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 * Neither the name of James Harper nor the
15 names of its contributors may be used to endorse or promote products
16 derived from this software without specific prior written permission.
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 DISCLAIMED. IN NO EVENT SHALL JAMES HARPER BE LIABLE FOR ANY
22 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
30 #include "xennet.h"
32 static __inline shared_buffer_t *
33 get_pb_from_freelist(struct xennet_info *xi)
34 {
35 shared_buffer_t *pb;
36 PVOID ptr_ref;
38 if (stack_pop(xi->rx_pb_stack, &ptr_ref))
39 {
40 pb = ptr_ref;
41 pb->ref_count = 1;
42 InterlockedDecrement(&xi->rx_pb_free);
43 return pb;
44 }
46 /* don't allocate a new one if we are shutting down */
47 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
48 return NULL;
50 pb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t), XENNET_POOL_TAG, LowPoolPriority);
51 if (!pb)
52 return NULL;
53 pb->virtual = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG, LowPoolPriority);
54 if (!pb->virtual)
55 {
56 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
57 return NULL;
58 }
59 pb->mdl = IoAllocateMdl(pb->virtual, PAGE_SIZE, FALSE, FALSE, NULL);
60 if (!pb->mdl)
61 {
62 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
63 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
64 return NULL;
65 }
66 pb->gref = (grant_ref_t)XnGrantAccess(xi->handle,
67 (ULONG)(MmGetPhysicalAddress(pb->virtual).QuadPart >> PAGE_SHIFT), FALSE, INVALID_GRANT_REF, (ULONG)'XNRX');
68 if (pb->gref == INVALID_GRANT_REF)
69 {
70 IoFreeMdl(pb->mdl);
71 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
72 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
73 return NULL;
74 }
75 MmBuildMdlForNonPagedPool(pb->mdl);
76 pb->ref_count = 1;
77 return pb;
78 }
80 static __inline VOID
81 ref_pb(struct xennet_info *xi, shared_buffer_t *pb)
82 {
83 UNREFERENCED_PARAMETER(xi);
84 InterlockedIncrement(&pb->ref_count);
85 }
87 static __inline VOID
88 put_pb_on_freelist(struct xennet_info *xi, shared_buffer_t *pb)
89 {
90 if (InterlockedDecrement(&pb->ref_count) == 0)
91 {
92 //NdisAdjustBufferLength(pb->buffer, PAGE_SIZE);
93 //NDIS_BUFFER_LINKAGE(pb->buffer) = NULL;
94 if (xi->rx_pb_free > RX_MAX_PB_FREELIST)
95 {
96 XnEndAccess(xi->handle, pb->gref, FALSE, (ULONG)'XNRX');
97 IoFreeMdl(pb->mdl);
98 ExFreePoolWithTag(pb->virtual, XENNET_POOL_TAG);
99 ExFreePoolWithTag(pb, XENNET_POOL_TAG);
100 return;
101 }
102 pb->mdl->ByteCount = PAGE_SIZE;
103 pb->mdl->Next = NULL;
104 pb->next = NULL;
105 stack_push(xi->rx_pb_stack, pb);
106 InterlockedIncrement(&xi->rx_pb_free);
107 }
108 }
110 static __inline shared_buffer_t *
111 get_hb_from_freelist(struct xennet_info *xi)
112 {
113 shared_buffer_t *hb;
114 PVOID ptr_ref;
116 if (stack_pop(xi->rx_hb_stack, &ptr_ref))
117 {
118 hb = ptr_ref;
119 InterlockedDecrement(&xi->rx_hb_free);
120 return hb;
121 }
123 /* don't allocate a new one if we are shutting down */
124 if (xi->device_state != DEVICE_STATE_INITIALISING && xi->device_state != DEVICE_STATE_ACTIVE)
125 return NULL;
127 hb = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, XENNET_POOL_TAG, LowPoolPriority);
128 if (!hb)
129 return NULL;
130 NdisZeroMemory(hb, sizeof(shared_buffer_t));
131 hb->mdl = IoAllocateMdl(hb + 1, MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH, FALSE, FALSE, NULL);
132 if (!hb->mdl) {
133 ExFreePoolWithTag(hb, XENNET_POOL_TAG);
134 return NULL;
135 }
136 MmBuildMdlForNonPagedPool(hb->mdl);
137 return hb;
138 }
140 static __inline VOID
141 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
142 {
143 XN_ASSERT(xi);
144 hb->mdl->ByteCount = sizeof(shared_buffer_t) + MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH;
145 hb->mdl->Next = NULL;
146 hb->next = NULL;
147 stack_push(xi->rx_hb_stack, hb);
148 InterlockedIncrement(&xi->rx_hb_free);
149 }
151 // Called at DISPATCH_LEVEL with rx lock held
152 static VOID
153 XenNet_FillRing(struct xennet_info *xi)
154 {
155 unsigned short id;
156 shared_buffer_t *page_buf;
157 ULONG i, notify;
158 ULONG batch_target;
159 RING_IDX req_prod = xi->rx_ring.req_prod_pvt;
160 netif_rx_request_t *req;
162 //FUNCTION_ENTER();
164 if (xi->device_state != DEVICE_STATE_ACTIVE)
165 return;
167 batch_target = xi->rx_target - (req_prod - xi->rx_ring.rsp_cons);
169 if (batch_target < (xi->rx_target >> 2)) {
170 //FUNCTION_EXIT();
171 return; /* only refill if we are less than 3/4 full already */
172 }
174 for (i = 0; i < batch_target; i++) {
175 page_buf = get_pb_from_freelist(xi);
176 if (!page_buf) {
177 FUNCTION_MSG("Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target);
178 break;
179 }
180 xi->rx_id_free--;
182 /* Give to netback */
183 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
184 XN_ASSERT(xi->rx_ring_pbs[id] == NULL);
185 xi->rx_ring_pbs[id] = page_buf;
186 req = RING_GET_REQUEST(&xi->rx_ring, req_prod + i);
187 req->id = id;
188 req->gref = page_buf->gref;
189 XN_ASSERT(req->gref != INVALID_GRANT_REF);
190 }
191 KeMemoryBarrier();
192 xi->rx_ring.req_prod_pvt = req_prod + i;
193 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx_ring, notify);
194 if (notify) {
195 XnNotify(xi->handle, xi->event_channel);
196 }
198 //FUNCTION_EXIT();
200 return;
201 }
203 #if NTDDI_VERSION < NTDDI_VISTA
204 typedef struct {
205 PNDIS_PACKET first_packet;
206 PNDIS_PACKET last_packet;
207 ULONG packet_count;
208 } rx_context_t;
209 #else
210 typedef struct {
211 PNET_BUFFER_LIST first_nbl;
212 PNET_BUFFER_LIST last_nbl;
213 ULONG packet_count;
214 ULONG nbl_count;
215 } rx_context_t;
216 #endif
218 #if NTDDI_VERSION < NTDDI_VISTA
219 /*
220 NDIS5 appears to insist that the checksum on received packets is correct, and won't
221 believe us when we lie about it, which happens when the packet is generated on the
222 same bridge in Dom0. Doh!
223 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
224 */
226 static BOOLEAN
227 XenNet_SumPacketData(
228 packet_info_t *pi,
229 PNDIS_PACKET packet,
230 BOOLEAN set_csum) {
231 USHORT i;
232 PUCHAR buffer;
233 PMDL mdl;
234 UINT total_length;
235 UINT data_length;
236 UINT buffer_length;
237 USHORT buffer_offset;
238 ULONG csum;
239 PUSHORT csum_ptr;
240 USHORT remaining;
241 USHORT ip4_length;
242 BOOLEAN csum_span = TRUE; /* when the USHORT to be checksummed spans a buffer */
244 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
245 if (!buffer) {
246 FUNCTION_MSG("NdisGetFirstBufferFromPacketSafe failed, buffer == NULL\n");
247 return FALSE;
248 }
249 XN_ASSERT(mdl);
251 ip4_length = GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 2]);
252 data_length = ip4_length + XN_HDR_SIZE;
254 if ((USHORT)data_length > total_length) {
255 FUNCTION_MSG("Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length);
256 return FALSE;
257 }
259 switch (pi->ip_proto) {
260 case 6:
261 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 17));
262 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
263 break;
264 case 17:
265 XN_ASSERT(buffer_length >= (USHORT)(XN_HDR_SIZE + pi->ip4_header_length + 7));
266 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
267 break;
268 default:
269 FUNCTION_MSG("Don't know how to calc sum for IP Proto %d\n", pi->ip_proto);
270 //FUNCTION_EXIT();
271 return FALSE; // should never happen
272 }
274 if (set_csum)
275 *csum_ptr = 0;
277 csum = 0;
278 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 12]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 14]); // src
279 csum += GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 16]) + GET_NET_PUSHORT(&buffer[XN_HDR_SIZE + 18]); // dst
280 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
282 remaining = ip4_length - pi->ip4_header_length;
284 csum += remaining;
286 csum_span = FALSE;
287 buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length;
288 while (i < data_length) {
289 /* don't include the checksum field itself in the calculation */
290 if ((pi->ip_proto == 6 && i == XN_HDR_SIZE + pi->ip4_header_length + 16) || (pi->ip_proto == 17 && i == XN_HDR_SIZE + pi->ip4_header_length + 6)) {
291 /* we know that this always happens in the header buffer so we are guaranteed the full two bytes */
292 i += 2;
293 buffer_offset += 2;
294 continue;
295 }
296 if (csum_span) {
297 /* the other half of the next bit */
298 XN_ASSERT(buffer_offset == 0);
299 csum += (USHORT)buffer[buffer_offset];
300 csum_span = FALSE;
301 i += 1;
302 buffer_offset += 1;
303 } else if (buffer_offset == buffer_length - 1) {
304 /* deal with a buffer ending on an odd byte boundary */
305 csum += (USHORT)buffer[buffer_offset] << 8;
306 csum_span = TRUE;
307 i += 1;
308 buffer_offset += 1;
309 } else {
310 csum += GET_NET_PUSHORT(&buffer[buffer_offset]);
311 i += 2;
312 buffer_offset += 2;
313 }
314 if (buffer_offset == buffer_length && i < total_length) {
315 NdisGetNextBuffer(mdl, &mdl);
316 if (mdl == NULL) {
317 FUNCTION_MSG(__DRIVER_NAME " Ran out of buffers\n");
318 return FALSE; // should never happen
319 }
320 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
321 XN_ASSERT(buffer_length);
322 buffer_offset = 0;
323 }
324 }
326 while (csum & 0xFFFF0000)
327 csum = (csum & 0xFFFF) + (csum >> 16);
329 if (set_csum) {
330 *csum_ptr = (USHORT)~GET_NET_USHORT((USHORT)csum);
331 } else {
332 return (BOOLEAN)(*csum_ptr == (USHORT)~GET_NET_USHORT((USHORT)csum));
333 }
334 return TRUE;
335 }
336 #endif
338 static BOOLEAN
339 XenNet_MakePacket(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi) {
340 #if NTDDI_VERSION < NTDDI_VISTA
341 NDIS_STATUS status;
342 PNDIS_PACKET packet;
343 #else
344 PNET_BUFFER_LIST nbl;
345 PNET_BUFFER packet;
346 #endif
347 PMDL mdl_head, mdl_tail, curr_mdl;
348 PUCHAR header_va;
349 ULONG out_remaining;
350 ULONG header_extra;
351 shared_buffer_t *header_buf;
352 ULONG outstanding;
353 #if NTDDI_VERSION < NTDDI_VISTA
354 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
355 //UINT packet_length;
356 #else
357 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
358 #endif
359 //FUNCTION_ENTER();
361 #if NTDDI_VERSION < NTDDI_VISTA
362 NdisAllocatePacket(&status, &packet, xi->rx_packet_pool);
363 if (status != NDIS_STATUS_SUCCESS) {
364 FUNCTION_MSG("No free packets\n");
365 return FALSE;
366 }
368 NdisZeroMemory(packet->MiniportReservedEx, sizeof(packet->MiniportReservedEx));
369 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
370 #else
371 nbl = NdisAllocateNetBufferList(xi->rx_nbl_pool, 0, 0);
372 if (!nbl) {
373 /* buffers will be freed in MakePackets */
374 FUNCTION_MSG("No free nbls\n");
375 //FUNCTION_EXIT();
376 return FALSE;
377 }
379 packet = NdisAllocateNetBuffer(xi->rx_packet_pool, NULL, 0, 0);
380 if (!packet) {
381 FUNCTION_MSG("No free packets\n");
382 NdisFreeNetBufferList(nbl);
383 //FUNCTION_EXIT();
384 return FALSE;
385 }
386 #endif
388 if ((!pi->first_mdl->Next || (xi->config_rx_coalesce && pi->total_length <= PAGE_SIZE)) && !pi->split_required) {
389 /* a single buffer <= MTU */
390 header_buf = NULL;
391 /* get all the packet into the header */
392 XenNet_BuildHeader(pi, pi->first_mdl_virtual, PAGE_SIZE);
393 #if NTDDI_VERSION < NTDDI_VISTA
394 NdisChainBufferAtBack(packet, pi->first_mdl);
395 PACKET_FIRST_PB(packet) = pi->first_pb;
396 #else
397 NET_BUFFER_FIRST_MDL(packet) = pi->first_mdl;
398 NET_BUFFER_CURRENT_MDL(packet) = pi->first_mdl;
399 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
400 NET_BUFFER_DATA_OFFSET(packet) = 0;
401 NET_BUFFER_DATA_LENGTH(packet) = pi->total_length;
402 NB_FIRST_PB(packet) = pi->first_pb;
403 #endif
404 ref_pb(xi, pi->first_pb);
405 } else {
406 XN_ASSERT(ndis_os_minor_version >= 1);
407 header_buf = get_hb_from_freelist(xi);
408 if (!header_buf) {
409 FUNCTION_MSG("No free header buffers\n");
410 #if NTDDI_VERSION < NTDDI_VISTA
411 NdisUnchainBufferAtFront(packet, &curr_mdl);
412 NdisFreePacket(packet);
413 #else
414 NdisFreeNetBufferList(nbl);
415 NdisFreeNetBuffer(packet);
416 #endif
417 return FALSE;
418 }
419 header_va = (PUCHAR)(header_buf + 1);
420 NdisMoveMemory(header_va, pi->header, pi->header_length);
421 //if (pi->ip_proto == 50) {
422 // FUNCTION_MSG("header_length = %d, current_lookahead = %d\n", pi->header_length, xi->current_lookahead);
423 // FUNCTION_MSG("ip4_header_length = %d\n", pi->ip4_header_length);
424 // FUNCTION_MSG("tcp_header_length = %d\n", pi->tcp_header_length);
425 //}
426 /* make sure only the header is in the first buffer (or the entire packet, but that is done in the above case) */
427 XenNet_BuildHeader(pi, header_va, MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
428 header_extra = pi->header_length - (MAX_ETH_HEADER_LENGTH + pi->ip4_header_length + pi->tcp_header_length);
429 XN_ASSERT(pi->header_length <= MAX_ETH_HEADER_LENGTH + MAX_LOOKAHEAD_LENGTH);
430 header_buf->mdl->ByteCount = pi->header_length;
431 mdl_head = mdl_tail = curr_mdl = header_buf->mdl;
432 #if NTDDI_VERSION < NTDDI_VISTA
433 PACKET_FIRST_PB(packet) = header_buf;
434 header_buf->next = pi->curr_pb;
435 NdisChainBufferAtBack(packet, mdl_head);
436 #else
437 NB_FIRST_PB(packet) = header_buf;
438 header_buf->next = pi->curr_pb;
439 NET_BUFFER_FIRST_MDL(packet) = mdl_head;
440 NET_BUFFER_CURRENT_MDL(packet) = mdl_head;
441 NET_BUFFER_CURRENT_MDL_OFFSET(packet) = 0;
442 NET_BUFFER_DATA_OFFSET(packet) = 0;
443 NET_BUFFER_DATA_LENGTH(packet) = pi->header_length;
444 #endif
446 if (pi->split_required) {
447 /* must be ip4 */
448 ULONG tcp_length;
449 USHORT new_ip4_length;
450 tcp_length = (USHORT)min(pi->mss, pi->tcp_remaining);
451 new_ip4_length = (USHORT)(pi->ip4_header_length + pi->tcp_header_length + tcp_length);
452 SET_NET_USHORT(&header_va[XN_HDR_SIZE + 2], new_ip4_length);
453 SET_NET_ULONG(&header_va[XN_HDR_SIZE + pi->ip4_header_length + 4], pi->tcp_seq);
454 pi->tcp_seq += tcp_length;
455 pi->tcp_remaining = (USHORT)(pi->tcp_remaining - tcp_length);
456 /* part of the packet is already present in the header buffer for lookahead */
457 out_remaining = tcp_length - header_extra;
458 XN_ASSERT((LONG)out_remaining >= 0);
459 } else {
460 out_remaining = pi->total_length - pi->header_length;
461 XN_ASSERT((LONG)out_remaining >= 0);
462 }
464 while (out_remaining != 0) {
465 //ULONG in_buffer_offset;
466 ULONG in_buffer_length;
467 ULONG out_length;
469 //if (pi->ip_proto == 50) {
470 // FUNCTION_MSG("in loop - out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb);
471 //}
472 if (!pi->curr_mdl || !pi->curr_pb) {
473 FUNCTION_MSG("out of buffers for packet\n");
474 //KdPrint((__DRIVER_NAME " out_remaining = %d, curr_buffer = %p, curr_pb = %p\n", out_remaining, pi->curr_mdl, pi->curr_pb));
475 // TODO: free some stuff or we'll leak
476 /* unchain buffers then free packet */
477 //FUNCTION_EXIT();
478 return FALSE;
479 }
481 in_buffer_length = MmGetMdlByteCount(pi->curr_mdl);
482 out_length = min(out_remaining, in_buffer_length - pi->curr_mdl_offset);
483 curr_mdl = IoAllocateMdl((PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length, FALSE, FALSE, NULL);
484 XN_ASSERT(curr_mdl);
485 IoBuildPartialMdl(pi->curr_mdl, curr_mdl, (PUCHAR)MmGetMdlVirtualAddress(pi->curr_mdl) + pi->curr_mdl_offset, out_length);
486 mdl_tail->Next = curr_mdl;
487 mdl_tail = curr_mdl;
488 curr_mdl->Next = NULL; /* I think this might be redundant */
489 #if NTDDI_VERSION < NTDDI_VISTA
490 #else
491 NET_BUFFER_DATA_LENGTH(packet) += out_length;
492 #endif
493 ref_pb(xi, pi->curr_pb);
494 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + out_length);
495 if (pi->curr_mdl_offset == in_buffer_length) {
496 pi->curr_mdl = pi->curr_mdl->Next;
497 pi->curr_pb = pi->curr_pb->next;
498 pi->curr_mdl_offset = 0;
499 }
500 out_remaining -= out_length;
501 }
502 #if NTDDI_VERSION < NTDDI_VISTA
503 if (pi->split_required) {
504 // TODO: only if Ip checksum is disabled...
505 XenNet_SumIpHeader(header_va, pi->ip4_header_length);
506 }
507 #endif
508 if (header_extra > 0)
509 pi->header_length -= header_extra;
510 }
512 rc->packet_count++;
513 #if NTDDI_VERSION < NTDDI_VISTA
514 #else
515 NET_BUFFER_LIST_FIRST_NB(nbl) = packet;
516 #endif
518 if (pi->parse_result == PARSE_OK) {
519 #if NTDDI_VERSION < NTDDI_VISTA
520 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
521 packet, TcpIpChecksumPacketInfo);
522 csum_info->Value = 0;
523 if (pi->csum_blank || pi->data_validated || pi->split_required) {
524 BOOLEAN checksum_offload = FALSE;
525 /* we know this is IPv4, and we know Linux always validates the IPv4 checksum for us */
526 if (xi->setting_csum.V4Receive.IpChecksum) {
527 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
528 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
529 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
530 else
531 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
532 }
533 }
534 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
535 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
536 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
537 checksum_offload = TRUE;
538 }
539 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
540 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
541 checksum_offload = TRUE;
542 }
543 if (pi->csum_blank && (!xi->config_csum_rx_dont_fix || !checksum_offload)) {
544 XenNet_SumPacketData(pi, packet, TRUE);
545 }
546 } else if (xi->config_csum_rx_check && pi->ip_version == 4) {
547 if (xi->setting_csum.V4Receive.IpChecksum) {
548 if (!pi->ip_has_options || xi->setting_csum.V4Receive.IpOptionsSupported) {
549 if (XenNet_CheckIpHeaderSum(pi->header, pi->ip4_header_length))
550 csum_info->Receive.NdisPacketIpChecksumSucceeded = TRUE;
551 else
552 csum_info->Receive.NdisPacketIpChecksumFailed = TRUE;
553 }
554 }
555 if (xi->setting_csum.V4Receive.TcpChecksum && pi->ip_proto == 6) {
556 if (!pi->tcp_has_options || xi->setting_csum.V4Receive.TcpOptionsSupported) {
557 if (XenNet_SumPacketData(pi, packet, FALSE)) {
558 csum_info->Receive.NdisPacketTcpChecksumSucceeded = TRUE;
559 } else {
560 csum_info->Receive.NdisPacketTcpChecksumFailed = TRUE;
561 }
562 }
563 } else if (xi->setting_csum.V4Receive.UdpChecksum && pi->ip_proto == 17) {
564 if (XenNet_SumPacketData(pi, packet, FALSE)) {
565 csum_info->Receive.NdisPacketUdpChecksumSucceeded = TRUE;
566 } else {
567 csum_info->Receive.NdisPacketUdpChecksumFailed = TRUE;
568 }
569 }
570 }
571 #else
572 csum_info.Value = 0;
573 if (pi->csum_blank || pi->data_validated || pi->mss) {
574 if (pi->ip_proto == 6) {
575 csum_info.Receive.IpChecksumSucceeded = TRUE;
576 csum_info.Receive.TcpChecksumSucceeded = TRUE;
577 } else if (pi->ip_proto == 17) {
578 csum_info.Receive.IpChecksumSucceeded = TRUE;
579 csum_info.Receive.UdpChecksumSucceeded = TRUE;
580 }
581 }
582 NET_BUFFER_LIST_INFO(nbl, TcpIpChecksumNetBufferListInfo) = csum_info.Value;
583 #endif
584 }
586 #if NTDDI_VERSION < NTDDI_VISTA
587 if (!rc->first_packet) {
588 rc->first_packet = packet;
589 } else {
590 PACKET_NEXT_PACKET(rc->last_packet) = packet;
591 }
592 rc->last_packet = packet;
593 rc->packet_count++;
594 #else
595 if (!rc->first_nbl) {
596 rc->first_nbl = nbl;
597 } else {
598 NET_BUFFER_LIST_NEXT_NBL(rc->last_nbl) = nbl;
599 }
600 rc->last_nbl = nbl;
601 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
602 rc->nbl_count++;
603 if (pi->is_multicast) {
604 /* multicast */
605 xi->stats.ifHCInMulticastPkts++;
606 xi->stats.ifHCInMulticastOctets += NET_BUFFER_DATA_LENGTH(packet);
607 } else if (pi->is_broadcast) {
608 /* broadcast */
609 xi->stats.ifHCInBroadcastPkts++;
610 xi->stats.ifHCInBroadcastOctets += NET_BUFFER_DATA_LENGTH(packet);
611 } else {
612 /* unicast */
613 xi->stats.ifHCInUcastPkts++;
614 xi->stats.ifHCInUcastOctets += NET_BUFFER_DATA_LENGTH(packet);
615 }
616 #endif
618 outstanding = InterlockedIncrement(&xi->rx_outstanding);
619 #if NTDDI_VERSION < NTDDI_VISTA
620 if (outstanding > RX_PACKET_HIGH_WATER_MARK || !xi->rx_pb_free) {
621 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
622 } else {
623 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
624 }
625 #if 0
626 /* windows gets lazy about ack packets and holds on to them forever under high load situations. we don't like this */
627 NdisQueryPacketLength(packet, &packet_length);
628 if (pi->parse_result != PARSE_OK || (pi->ip_proto == 6 && packet_length <= NDIS_STATUS_RESOURCES_MAX_LENGTH))
629 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_RESOURCES);
630 else
631 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
632 #endif
633 #endif
635 //FUNCTION_EXIT();
636 return TRUE;
637 }
639 static VOID
640 XenNet_MakePackets(struct xennet_info *xi, rx_context_t *rc, packet_info_t *pi)
641 {
642 UCHAR tcp_flags;
643 shared_buffer_t *page_buf;
645 XenNet_ParsePacketHeader(pi, NULL, XN_HDR_SIZE + xi->current_lookahead);
647 if (!XenNet_FilterAcceptPacket(xi, pi)) {
648 goto done;
649 }
651 if (pi->split_required) {
652 #if NTDDI_VERSION < NTDDI_VISTA
653 /* need to split to mss for NDIS5 */
654 #else
655 switch (xi->current_gso_rx_split_type) {
656 case RX_LSO_SPLIT_HALF:
657 pi->mss = max((pi->tcp_length + 1) / 2, pi->mss);
658 break;
659 case RX_LSO_SPLIT_NONE:
660 pi->mss = 65535;
661 break;
662 }
663 #endif
664 }
666 switch (pi->ip_proto) {
667 case 6: // TCP
668 if (pi->split_required)
669 break;
670 /* fall through */
671 case 17: // UDP
672 if (!XenNet_MakePacket(xi, rc, pi)) {
673 FUNCTION_MSG("Failed to make packet\n");
674 #if NTDDI_VERSION < NTDDI_VISTA
675 xi->stat_rx_no_buffer++;
676 #else
677 xi->stats.ifInDiscards++;
678 #endif
679 goto done;
680 }
681 goto done;
682 default:
683 if (!XenNet_MakePacket(xi, rc, pi)) {
684 FUNCTION_MSG("Failed to make packet\n");
685 #if NTDDI_VERSION < NTDDI_VISTA
686 xi->stat_rx_no_buffer++;
687 #else
688 xi->stats.ifInDiscards++;
689 #endif
690 goto done;
691 }
692 goto done;
693 }
695 /* this is the split_required code */
696 pi->tcp_remaining = pi->tcp_length;
698 /* we can make certain assumptions here as the following code is only for tcp4 */
699 tcp_flags = pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13];
700 /* clear all tcp flags except ack except for the last packet */
701 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] &= 0x10;
702 while (pi->tcp_remaining) {
703 if (pi->tcp_remaining <= pi->mss) {
704 /* restore tcp flags for the last packet */
705 pi->header[XN_HDR_SIZE + pi->ip4_header_length + 13] = tcp_flags;
706 }
707 if (!XenNet_MakePacket(xi, rc, pi)) {
708 FUNCTION_MSG("Failed to make packet\n");
709 #if NTDDI_VERSION < NTDDI_VISTA
710 xi->stat_rx_no_buffer++;
711 #else
712 xi->stats.ifInDiscards++;
713 #endif
714 break; /* we are out of memory - just drop the packets */
715 }
716 }
717 done:
718 page_buf = pi->first_pb;
719 while (page_buf) {
720 shared_buffer_t *next_pb = page_buf->next;
721 put_pb_on_freelist(xi, page_buf); /* this doesn't actually free the page_puf if there are outstanding references */
722 page_buf = next_pb;
723 }
724 XenNet_ClearPacketInfo(pi);
725 //FUNCTION_EXIT();
726 return;
727 }
729 #if NTDDI_VERSION < NTDDI_VISTA
730 /* called at DISPATCH_LEVEL */
731 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
732 VOID
733 XenNet_ReturnPacket(NDIS_HANDLE adapter_context, PNDIS_PACKET packet) {
734 struct xennet_info *xi = adapter_context;
735 PNDIS_BUFFER buffer;
736 shared_buffer_t *page_buf = PACKET_FIRST_PB(packet);
738 //FUNCTION_ENTER();
739 NdisUnchainBufferAtFront(packet, &buffer);
741 while (buffer) {
742 shared_buffer_t *next_buf;
743 XN_ASSERT(page_buf);
744 next_buf = page_buf->next;
745 if (!page_buf->virtual) {
746 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
747 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(buffer) - 1);
748 } else {
749 if (buffer != page_buf->mdl)
750 NdisFreeBuffer(buffer);
751 put_pb_on_freelist(xi, page_buf);
752 }
753 NdisUnchainBufferAtFront(packet, &buffer);
754 page_buf = next_buf;
755 }
757 NdisFreePacket(packet);
758 InterlockedDecrement(&xi->rx_outstanding);
759 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
760 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
761 //FUNCTION_EXIT();
762 }
763 #else
764 /* called at <= DISPATCH_LEVEL */
765 /* it's okay for return packet to be called while resume_state != RUNNING as the packet will simply be added back to the freelist, the grants will be fixed later */
766 VOID
767 XenNet_ReturnNetBufferLists(NDIS_HANDLE adapter_context, PNET_BUFFER_LIST curr_nbl, ULONG return_flags)
768 {
769 struct xennet_info *xi = adapter_context;
770 UNREFERENCED_PARAMETER(return_flags);
772 //FUNCTION_ENTER();
774 //KdPrint((__DRIVER_NAME " page_buf = %p\n", page_buf));
776 XN_ASSERT(xi);
777 while (curr_nbl)
778 {
779 PNET_BUFFER_LIST next_nbl;
780 PNET_BUFFER curr_nb;
782 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
783 curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl);
784 while (curr_nb)
785 {
786 PNET_BUFFER next_nb;
787 PMDL curr_mdl;
788 shared_buffer_t *page_buf;
790 next_nb = NET_BUFFER_NEXT_NB(curr_nb);
791 curr_mdl = NET_BUFFER_FIRST_MDL(curr_nb);
792 page_buf = NB_FIRST_PB(curr_nb);
793 while (curr_mdl)
794 {
795 shared_buffer_t *next_buf;
796 PMDL next_mdl;
798 XN_ASSERT(page_buf); /* make sure that there is a pb to match this mdl */
799 next_mdl = curr_mdl->Next;
800 next_buf = page_buf->next;
801 if (!page_buf->virtual)
802 {
803 /* this is a hb not a pb because virtual is NULL (virtual is just the memory after the hb */
804 put_hb_on_freelist(xi, (shared_buffer_t *)MmGetMdlVirtualAddress(curr_mdl) - 1);
805 }
806 else
807 {
808 //KdPrint((__DRIVER_NAME " returning page_buf %p with id %d\n", page_buf, page_buf->id));
809 if (curr_mdl != page_buf->mdl)
810 {
811 //KdPrint((__DRIVER_NAME " curr_mdl = %p, page_buf->mdl = %p\n", curr_mdl, page_buf->mdl));
812 IoFreeMdl(curr_mdl);
813 }
814 put_pb_on_freelist(xi, page_buf);
815 }
816 curr_mdl = next_mdl;
817 page_buf = next_buf;
818 }
820 NdisFreeNetBuffer(curr_nb);
821 InterlockedDecrement(&xi->rx_outstanding);
823 curr_nb = next_nb;
824 }
825 NdisFreeNetBufferList(curr_nbl);
826 curr_nbl = next_nbl;
827 }
829 if (!xi->rx_outstanding && xi->device_state != DEVICE_STATE_ACTIVE)
830 KeSetEvent(&xi->rx_idle_event, IO_NO_INCREMENT, FALSE);
832 //FUNCTION_EXIT();
833 }
834 #endif
836 /* We limit the number of packets per interrupt so that acks get a chance
837 under high rx load. The DPC is immediately re-scheduled */
839 #define MAXIMUM_PACKETS_PER_INDICATE 32
841 #define MAXIMUM_PACKETS_PER_INTERRUPT 2560 /* this is calculated before large packet split */
842 #define MAXIMUM_DATA_PER_INTERRUPT (MAXIMUM_PACKETS_PER_INTERRUPT * 1500) /* help account for large packets */
844 // Called at DISPATCH_LEVEL
845 BOOLEAN
846 XenNet_RxBufferCheck(struct xennet_info *xi)
847 {
848 RING_IDX cons, prod;
849 ULONG packet_count = 0;
850 ULONG packet_data = 0;
851 ULONG buffer_count = 0;
852 USHORT id;
853 int more_to_do = FALSE;
854 shared_buffer_t *page_buf;
855 #if NTDDI_VERSION < NTDDI_VISTA
856 PNDIS_PACKET packets[MAXIMUM_PACKETS_PER_INDICATE];
857 PNDIS_PACKET first_header_only_packet;
858 PNDIS_PACKET last_header_only_packet;
859 #else
860 #endif
861 //ULONG nbl_count = 0;
862 ULONG interim_packet_data = 0;
863 struct netif_extra_info *ei;
864 rx_context_t rc;
865 packet_info_t *pi = &xi->rxpi[KeGetCurrentProcessorNumber() & 0xff];
866 shared_buffer_t *head_buf = NULL;
867 shared_buffer_t *tail_buf = NULL;
868 shared_buffer_t *last_buf = NULL;
869 BOOLEAN extra_info_flag = FALSE;
870 BOOLEAN more_data_flag = FALSE;
871 BOOLEAN dont_set_event;
872 //FUNCTION_ENTER();
874 #if NTDDI_VERSION < NTDDI_VISTA
875 rc.first_packet = NULL;
876 rc.last_packet = NULL;
877 rc.packet_count = 0;
878 #else
879 rc.first_nbl = NULL;
880 rc.last_nbl = NULL;
881 rc.packet_count = 0;
882 rc.nbl_count = 0;
883 #endif
885 /* get all the buffers off the ring as quickly as possible so the lock is held for a minimum amount of time */
886 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
888 if (xi->device_state != DEVICE_STATE_ACTIVE) {
889 /* there is a chance that our Dpc had been queued just before the shutdown... */
890 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
891 return FALSE;
892 }
894 if (xi->rx_partial_buf) {
895 head_buf = xi->rx_partial_buf;
896 tail_buf = xi->rx_partial_buf;
897 while (tail_buf->next)
898 tail_buf = tail_buf->next;
899 more_data_flag = xi->rx_partial_more_data_flag;
900 extra_info_flag = xi->rx_partial_extra_info_flag;
901 xi->rx_partial_buf = NULL;
902 }
904 do {
905 prod = xi->rx_ring.sring->rsp_prod;
906 KeMemoryBarrier(); /* Ensure we see responses up to 'prod'. */
908 for (cons = xi->rx_ring.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INTERRUPT && packet_data < MAXIMUM_DATA_PER_INTERRUPT; cons++) {
909 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
910 page_buf = xi->rx_ring_pbs[id];
911 XN_ASSERT(page_buf);
912 xi->rx_ring_pbs[id] = NULL;
913 xi->rx_id_free++;
914 memcpy(&page_buf->rsp, RING_GET_RESPONSE(&xi->rx_ring, cons), max(sizeof(struct netif_rx_response), sizeof(struct netif_extra_info)));
915 if (!extra_info_flag) {
916 if (page_buf->rsp.status <= 0 || page_buf->rsp.offset + page_buf->rsp.status > PAGE_SIZE) {
917 FUNCTION_MSG("Error: rsp offset %d, size %d\n",
918 page_buf->rsp.offset, page_buf->rsp.status);
919 XN_ASSERT(!extra_info_flag);
920 put_pb_on_freelist(xi, page_buf);
921 continue;
922 }
923 }
925 if (!head_buf) {
926 head_buf = page_buf;
927 tail_buf = page_buf;
928 } else {
929 tail_buf->next = page_buf;
930 tail_buf = page_buf;
931 }
932 page_buf->next = NULL;
934 if (extra_info_flag) {
935 ei = (struct netif_extra_info *)&page_buf->rsp;
936 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
937 } else {
938 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
939 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
940 interim_packet_data += page_buf->rsp.status;
941 }
943 if (!extra_info_flag && !more_data_flag) {
944 last_buf = page_buf;
945 packet_count++;
946 packet_data += interim_packet_data;
947 interim_packet_data = 0;
948 }
949 buffer_count++;
950 }
951 xi->rx_ring.rsp_cons = cons;
953 /* Give netback more buffers */
954 XenNet_FillRing(xi);
956 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
957 break;
959 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
960 if (!more_to_do) {
961 xi->rx_ring.sring->rsp_event = xi->rx_ring.rsp_cons + 1;
962 KeMemoryBarrier();
963 more_to_do = RING_HAS_UNCONSUMED_RESPONSES(&xi->rx_ring);
964 }
965 } while (more_to_do);
967 /* anything past last_buf belongs to an incomplete packet... */
968 if (last_buf && last_buf->next)
969 {
970 FUNCTION_MSG("Partial receive\n");
971 xi->rx_partial_buf = last_buf->next;
972 xi->rx_partial_more_data_flag = more_data_flag;
973 xi->rx_partial_extra_info_flag = extra_info_flag;
974 last_buf->next = NULL;
975 }
977 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
979 if (packet_count >= MAXIMUM_PACKETS_PER_INTERRUPT || packet_data >= MAXIMUM_DATA_PER_INTERRUPT)
980 {
981 /* fire again immediately */
982 FUNCTION_MSG("Dpc Duration Exceeded\n");
983 /* we want the Dpc on the end of the queue. By definition we are already on the right CPU so we know the Dpc queue will be run immediately */
984 // KeSetImportanceDpc(&xi->rxtx_dpc, MediumImportance);
985 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
986 /* dont set an event in TX path */
987 dont_set_event = TRUE;
988 }
989 else
990 {
991 /* make sure the Dpc queue is run immediately next interrupt */
992 // KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance);
993 /* set an event in TX path */
994 dont_set_event = FALSE;
995 }
997 /* make packets out of the buffers */
998 page_buf = head_buf;
999 extra_info_flag = FALSE;
1000 more_data_flag = FALSE;
1002 while (page_buf) {
1003 shared_buffer_t *next_buf = page_buf->next;
1004 PMDL mdl;
1006 page_buf->next = NULL;
1007 if (extra_info_flag) {
1008 //KdPrint((__DRIVER_NAME " processing extra info\n"));
1009 ei = (struct netif_extra_info *)&page_buf->rsp;
1010 extra_info_flag = ei->flags & XEN_NETIF_EXTRA_FLAG_MORE;
1011 switch (ei->type)
1013 case XEN_NETIF_EXTRA_TYPE_GSO:
1014 switch (ei->u.gso.type) {
1015 case XEN_NETIF_GSO_TYPE_TCPV4:
1016 pi->mss = ei->u.gso.size;
1017 // TODO - put this assertion somewhere XN_ASSERT(header_len + pi->mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
1018 break;
1019 default:
1020 FUNCTION_MSG("Unknown GSO type (%d) detected\n", ei->u.gso.type);
1021 break;
1023 break;
1024 default:
1025 FUNCTION_MSG("Unknown extra info type (%d) detected\n", ei->type);
1026 break;
1028 put_pb_on_freelist(xi, page_buf);
1029 } else {
1030 XN_ASSERT(!page_buf->rsp.offset);
1031 if (!more_data_flag) { // handling the packet's 1st buffer
1032 if (page_buf->rsp.flags & NETRXF_csum_blank)
1033 pi->csum_blank = TRUE;
1034 if (page_buf->rsp.flags & NETRXF_data_validated)
1035 pi->data_validated = TRUE;
1037 mdl = page_buf->mdl;
1038 mdl->ByteCount = page_buf->rsp.status; //NdisAdjustBufferLength(mdl, page_buf->rsp.status);
1039 //KdPrint((__DRIVER_NAME " buffer = %p, pb = %p\n", buffer, page_buf));
1040 if (pi->first_pb) {
1041 XN_ASSERT(pi->curr_pb);
1042 //KdPrint((__DRIVER_NAME " additional buffer\n"));
1043 pi->curr_pb->next = page_buf;
1044 pi->curr_pb = page_buf;
1045 XN_ASSERT(pi->curr_mdl);
1046 pi->curr_mdl->Next = mdl;
1047 pi->curr_mdl = mdl;
1048 } else {
1049 pi->first_pb = page_buf;
1050 pi->curr_pb = page_buf;
1051 pi->first_mdl = mdl;
1052 pi->curr_mdl = mdl;
1054 //pi->mdl_count++;
1055 extra_info_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_extra_info);
1056 more_data_flag = (BOOLEAN)(page_buf->rsp.flags & NETRXF_more_data);
1057 pi->total_length = pi->total_length + page_buf->rsp.status;
1060 /* Packet done, add it to the list */
1061 if (!more_data_flag && !extra_info_flag) {
1062 pi->curr_pb = pi->first_pb;
1063 pi->curr_mdl = pi->first_mdl;
1064 XenNet_MakePackets(xi, &rc, pi);
1067 page_buf = next_buf;
1069 XN_ASSERT(!more_data_flag && !extra_info_flag);
1071 #if NTDDI_VERSION < NTDDI_VISTA
1072 packet_count = 0;
1073 first_header_only_packet = NULL;
1074 last_header_only_packet = NULL;
1076 while (rc.first_packet) {
1077 PNDIS_PACKET packet;
1078 NDIS_STATUS status;
1080 packet = rc.first_packet;
1081 XN_ASSERT(PACKET_FIRST_PB(packet));
1082 rc.first_packet = PACKET_NEXT_PACKET(packet);
1083 status = NDIS_GET_PACKET_STATUS(packet);
1084 if (status == NDIS_STATUS_RESOURCES) {
1085 if (!first_header_only_packet) {
1086 first_header_only_packet = packet;
1087 } else {
1088 PACKET_NEXT_PACKET(last_header_only_packet) = packet;
1090 last_header_only_packet = packet;
1091 PACKET_NEXT_PACKET(packet) = NULL;
1093 packets[packet_count++] = packet;
1094 /* if we indicate a packet with NDIS_STATUS_RESOURCES then any following packet can't be NDIS_STATUS_SUCCESS */
1095 if (packet_count == MAXIMUM_PACKETS_PER_INDICATE || !rc.first_packet
1096 || (NDIS_GET_PACKET_STATUS(rc.first_packet) == NDIS_STATUS_SUCCESS
1097 && status == NDIS_STATUS_RESOURCES)) {
1098 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
1099 packet_count = 0;
1102 /* now return the packets for which we indicated NDIS_STATUS_RESOURCES */
1103 while (first_header_only_packet) {
1104 PNDIS_PACKET packet = first_header_only_packet;
1105 first_header_only_packet = PACKET_NEXT_PACKET(packet);
1106 XenNet_ReturnPacket(xi, packet);
1108 #else
1109 if (rc.first_nbl) {
1110 NdisMIndicateReceiveNetBufferLists(xi->adapter_handle, rc.first_nbl,
1111 NDIS_DEFAULT_PORT_NUMBER, rc.nbl_count,
1112 NDIS_RECEIVE_FLAGS_DISPATCH_LEVEL
1113 //| NDIS_RECEIVE_FLAGS_SINGLE_ETHER_TYPE
1114 | NDIS_RECEIVE_FLAGS_PERFECT_FILTERED);
1116 #endif
1117 //FUNCTION_EXIT();
1118 return dont_set_event;
1121 static VOID
1122 XenNet_BufferFree(xennet_info_t *xi)
1124 shared_buffer_t *sb;
1125 int i;
1127 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1128 if (xi->rx_ring_pbs[i] != NULL) {
1129 put_pb_on_freelist(xi, xi->rx_ring_pbs[i]);
1130 xi->rx_ring_pbs[i] = NULL;
1134 /* because we are shutting down this won't allocate new ones */
1135 while ((sb = get_pb_from_freelist(xi)) != NULL) {
1136 XnEndAccess(xi->handle,
1137 sb->gref, FALSE, (ULONG)'XNRX');
1138 IoFreeMdl(sb->mdl);
1139 ExFreePoolWithTag(sb->virtual, XENNET_POOL_TAG);
1140 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1142 while ((sb = get_hb_from_freelist(xi)) != NULL) {
1143 IoFreeMdl(sb->mdl);
1144 ExFreePoolWithTag(sb, XENNET_POOL_TAG);
1148 BOOLEAN
1149 XenNet_RxInit(xennet_info_t *xi) {
1150 #if NTDDI_VERSION < NTDDI_VISTA
1151 NDIS_STATUS status;
1152 #else
1153 NET_BUFFER_LIST_POOL_PARAMETERS nbl_pool_parameters;
1154 NET_BUFFER_POOL_PARAMETERS nb_pool_parameters;
1155 #endif
1156 int ret;
1157 int i;
1159 FUNCTION_ENTER();
1161 // this stuff needs to be done once only...
1162 KeInitializeSpinLock(&xi->rx_lock);
1163 KeInitializeEvent(&xi->rx_idle_event, SynchronizationEvent, FALSE);
1164 xi->rxpi = ExAllocatePoolWithTagPriority(NonPagedPool, sizeof(packet_info_t) * NdisSystemProcessorCount(), XENNET_POOL_TAG, NormalPoolPriority);
1165 if (!xi->rxpi) {
1166 FUNCTION_MSG("ExAllocatePoolWithTagPriority failed\n");
1167 return FALSE;
1169 NdisZeroMemory(xi->rxpi, sizeof(packet_info_t) * NdisSystemProcessorCount());
1171 ret = stack_new(&xi->rx_pb_stack, NET_RX_RING_SIZE * 4);
1172 if (!ret) {
1173 FUNCTION_MSG("Failed to allocate rx_pb_stack\n");
1174 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1175 return FALSE;
1177 ret = stack_new(&xi->rx_hb_stack, NET_RX_RING_SIZE * 4);
1178 if (!ret) {
1179 FUNCTION_MSG("Failed to allocate rx_hb_stack\n");
1180 stack_delete(xi->rx_pb_stack, NULL, NULL);
1181 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1182 return FALSE;
1185 xi->rx_id_free = NET_RX_RING_SIZE;
1186 xi->rx_outstanding = 0;
1188 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1189 xi->rx_ring_pbs[i] = NULL;
1192 #if NTDDI_VERSION < NTDDI_VISTA
1193 NdisAllocatePacketPool(&status, &xi->rx_packet_pool, NET_RX_RING_SIZE * 4, PROTOCOL_RESERVED_SIZE_IN_PACKET);
1194 if (status != NDIS_STATUS_SUCCESS) {
1195 FUNCTION_MSG("NdisAllocatePacketPool failed with 0x%x\n", status);
1196 return FALSE;
1198 #else
1199 nbl_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1200 nbl_pool_parameters.Header.Revision = NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1201 nbl_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_LIST_POOL_PARAMETERS_REVISION_1;
1202 nbl_pool_parameters.ProtocolId = NDIS_PROTOCOL_ID_DEFAULT;
1203 nbl_pool_parameters.fAllocateNetBuffer = FALSE;
1204 nbl_pool_parameters.ContextSize = 0;
1205 nbl_pool_parameters.PoolTag = XENNET_POOL_TAG;
1206 nbl_pool_parameters.DataSize = 0; /* NET_BUFFERS are always allocated separately */
1208 xi->rx_nbl_pool = NdisAllocateNetBufferListPool(xi->adapter_handle, &nbl_pool_parameters);
1209 if (!xi->rx_nbl_pool) {
1210 FUNCTION_MSG("NdisAllocateNetBufferListPool failed\n");
1211 return FALSE;
1214 nb_pool_parameters.Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1215 nb_pool_parameters.Header.Revision = NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1216 nb_pool_parameters.Header.Size = NDIS_SIZEOF_NET_BUFFER_POOL_PARAMETERS_REVISION_1;
1217 nb_pool_parameters.PoolTag = XENNET_POOL_TAG;
1218 nb_pool_parameters.DataSize = 0; /* the buffers come from the ring */
1219 xi->rx_packet_pool = NdisAllocateNetBufferPool(xi->adapter_handle, &nb_pool_parameters);
1220 if (!xi->rx_packet_pool) {
1221 FUNCTION_MSG("NdisAllocateNetBufferPool (rx_packet_pool) failed\n");
1222 return FALSE;
1224 #endif
1225 XenNet_FillRing(xi);
1227 FUNCTION_EXIT();
1229 return TRUE;
1232 VOID
1233 XenNet_RxShutdown(xennet_info_t *xi) {
1234 KIRQL old_irql;
1235 UNREFERENCED_PARAMETER(xi);
1237 FUNCTION_ENTER();
1239 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1240 while (xi->rx_outstanding) {
1241 FUNCTION_MSG("Waiting for %d packets to be returned\n", xi->rx_outstanding);
1242 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1243 KeWaitForSingleObject(&xi->rx_idle_event, Executive, KernelMode, FALSE, NULL);
1244 KeAcquireSpinLock(&xi->rx_lock, &old_irql);
1246 KeReleaseSpinLock(&xi->rx_lock, old_irql);
1248 XenNet_BufferFree(xi);
1250 stack_delete(xi->rx_pb_stack, NULL, NULL);
1251 stack_delete(xi->rx_hb_stack, NULL, NULL);
1254 ExFreePoolWithTag(xi->rxpi, XENNET_POOL_TAG);
1256 #if NTDDI_VERSION < NTDDI_VISTA
1257 NdisFreePacketPool(xi->rx_packet_pool);
1258 #else
1259 NdisFreeNetBufferPool(xi->rx_packet_pool);
1260 NdisFreeNetBufferListPool(xi->rx_nbl_pool);
1261 #endif
1263 FUNCTION_EXIT();
1264 return;