win-pvdrivers

view xennet/xennet_rx.c @ 244:d31884ed02a9

tweaked a few things to make crashes go away
author James Harper <james.harper@bendigoit.com.au>
date Wed Apr 02 00:21:09 2008 +1100 (2008-04-02)
parents dffb6524631b
children 1b1f26917b6f
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static PMDL
24 get_page_from_freelist(struct xennet_info *xi)
25 {
26 PMDL mdl;
28 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
30 if (xi->page_free == 0)
31 {
32 mdl = AllocatePagesExtra(1, sizeof(grant_ref_t));
33 *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)) = xi->XenInterface.GntTbl_GrantAccess(
34 xi->XenInterface.InterfaceHeader.Context, 0,
35 *MmGetMdlPfnArray(mdl), FALSE, 0);
36 // KdPrint(("New Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
37 // mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
38 }
39 else
40 {
41 xi->page_free--;
42 mdl = xi->page_list[xi->page_free];
43 // KdPrint(("Old Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
44 // mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
45 }
47 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
49 return mdl;
50 }
52 static VOID
53 free_page_freelist(struct xennet_info *xi)
54 {
55 PMDL mdl;
56 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
58 while(xi->page_free != 0)
59 {
60 xi->page_free--;
61 mdl = xi->page_list[xi->page_free];
62 xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
63 *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)), 0);
64 FreePages(mdl);
65 }
66 }
68 static VOID
69 put_page_on_freelist(struct xennet_info *xi, PMDL mdl)
70 {
71 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
73 // KdPrint(("Mdl = %p\n", mdl));
75 xi->page_list[xi->page_free] = mdl;
76 xi->page_free++;
78 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
79 }
81 static __inline grant_ref_t
82 get_grant_ref(PMDL mdl)
83 {
84 return *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE));
85 }
87 // Called at DISPATCH_LEVEL with rx lock held
88 static NDIS_STATUS
89 XenNet_RxBufferAlloc(struct xennet_info *xi)
90 {
91 unsigned short id;
92 PMDL mdl;
93 int i, batch_target, notify;
94 RING_IDX req_prod = xi->rx.req_prod_pvt;
95 netif_rx_request_t *req;
96 int cycles = 0;
97 #if defined(XEN_PROFILE)
98 LARGE_INTEGER tsc, dummy;
99 #endif
101 //KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
102 #if defined(XEN_PROFILE)
103 tsc = KeQueryPerformanceCounter(&dummy);
104 #endif
106 batch_target = xi->rx_target - (req_prod - xi->rx.rsp_cons);
108 for (i = 0; i < batch_target; i++)
109 {
110 ASSERT(cycles++ < 256);
111 if (xi->rx_id_free == 0)
112 break;
113 mdl = get_page_from_freelist(xi);
114 if (mdl == NULL)
115 {
116 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring\n", i, batch_target));
117 break;
118 }
119 xi->rx_id_free--;
121 /* Give to netback */
122 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
123 // KdPrint((__DRIVER_NAME " id = %d\n", id));
124 ASSERT(xi->rx_buffers[id] == NULL);
125 xi->rx_buffers[id] = mdl;
126 req = RING_GET_REQUEST(&xi->rx, req_prod + i);
127 req->gref = get_grant_ref(mdl);
128 req->id = id;
129 }
131 xi->rx.req_prod_pvt = req_prod + i;
132 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx, notify);
133 if (notify)
134 {
135 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
136 xi->event_channel);
137 }
140 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
142 #if defined(XEN_PROFILE)
143 ProfTime_RxBufferAlloc.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
144 ProfCount_RxBufferAlloc++;
145 #endif
147 return NDIS_STATUS_SUCCESS;
148 }
150 #define __NET_USHORT_BYTE_0(x) ((USHORT)(x & 0xFF))
151 #define __NET_USHORT_BYTE_1(x) ((USHORT)((PUCHAR)&x)[1] & 0xFF)
153 #define GET_NET_USHORT(x) ((__NET_USHORT_BYTE_0(x) << 8) | __NET_USHORT_BYTE_1(x))
154 #define SET_NET_USHORT(y, x) *((USHORT *)&(y)) = ((__NET_USHORT_BYTE_0(x) << 8) | __NET_USHORT_BYTE_1(x))
156 #define GET_NET_ULONG(x) ((GET_NET_USHORT(x) << 16) | GET_NET_USHORT(((PUCHAR)&x)[2]))
157 #define SET_NET_ULONG(y, x) *((ULONG *)&(y)) = ((GET_NET_USHORT(x) << 16) | GET_NET_USHORT(((PUCHAR)&x)[2]))
159 static VOID
160 XenNet_ParseHeader(
161 struct xennet_info *xi
162 )
163 {
164 UINT header_length;
166 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
168 ASSERT(xi->rxpi.mdls[0]);
170 NdisQueryBufferSafe(xi->rxpi.mdls[0], &xi->rxpi.header, &header_length, NormalPagePriority);
172 if (header_length < XN_HDR_SIZE + 20 + 20) // minimum size of first buffer is ETH + IP + TCP header
173 {
174 return;
175 }
177 switch (GET_NET_USHORT(xi->rxpi.header[12])) // L2 protocol field
178 {
179 case 0x0800:
180 xi->rxpi.ip_version = (xi->rxpi.header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
181 if (xi->rxpi.ip_version != 4)
182 {
183 KdPrint((__DRIVER_NAME " ip_version = %d\n", xi->rxpi.ip_version));
184 return;
185 }
186 xi->rxpi.ip4_header_length = (xi->rxpi.header[XN_HDR_SIZE + 0] & 0x0F) << 2;
187 if (header_length < (ULONG)(xi->rxpi.ip4_header_length + 20))
188 {
189 KdPrint((__DRIVER_NAME " first packet is only %d long, must be >= %d\n", XN_HDR_SIZE + header_length, (ULONG)(XN_HDR_SIZE + xi->rxpi.ip4_header_length + 20)));
190 // we need to do something conclusive here...
191 return;
192 }
193 break;
194 default:
195 // KdPrint((__DRIVER_NAME " Not IP\n"));
196 return;
197 }
198 xi->rxpi.ip_proto = xi->rxpi.header[XN_HDR_SIZE + 9];
199 switch (xi->rxpi.ip_proto)
200 {
201 case 6: // TCP
202 case 17: // UDP
203 break;
204 default:
205 return;
206 }
207 xi->rxpi.ip4_length = GET_NET_USHORT(xi->rxpi.header[XN_HDR_SIZE + 2]);
208 xi->rxpi.tcp_header_length = (xi->rxpi.header[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 12] & 0xf0) >> 2;
209 xi->rxpi.tcp_length = xi->rxpi.ip4_length - xi->rxpi.ip4_header_length - xi->rxpi.tcp_header_length;
210 xi->rxpi.tcp_remaining = xi->rxpi.tcp_length;
211 xi->rxpi.tcp_seq = GET_NET_ULONG(xi->rxpi.header[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 4]);
212 if (xi->rxpi.mss > 0 && xi->rxpi.tcp_length > xi->rxpi.mss)
213 xi->rxpi.split_required = TRUE;
214 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
215 }
217 static VOID
218 XenNet_SumIpHeader(
219 struct xennet_info *xi,
220 PNDIS_PACKET packet
221 )
222 {
223 PMDL mdl;
224 UINT total_length;
225 UINT buffer_length;
226 PUCHAR buffer;
227 ULONG csum = 0;
228 USHORT i;
230 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
231 ASSERT(mdl);
233 buffer[XN_HDR_SIZE + 10] = 0;
234 buffer[XN_HDR_SIZE + 11] = 0;
235 for (i = 0; i < xi->rxpi.ip4_header_length; i += 2)
236 {
237 csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + i]);
238 }
239 while (csum & 0xFFFF0000)
240 csum = (csum & 0xFFFF) + (csum >> 16);
241 csum = ~csum;
242 SET_NET_USHORT(buffer[XN_HDR_SIZE + 10], csum);
243 }
246 /*
247 Windows appears to insist that the checksum on received packets is correct, and won't
248 believe us when we lie about it, which happens when the packet is generated on the
249 same bridge in Dom0. Doh!
250 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
251 */
252 static VOID
253 XenNet_SumPacketData(
254 struct xennet_info *xi,
255 PNDIS_PACKET packet
256 )
257 {
258 USHORT i;
259 PUCHAR buffer;
260 PMDL mdl;
261 UINT total_length;
262 UINT buffer_length;
263 USHORT buffer_offset;
264 ULONG csum;
265 PUSHORT csum_ptr;
266 USHORT remaining;
267 USHORT ip4_length;
269 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
271 #if defined(XEN_PROFILE)
272 ProfCount_RxPacketsCsumOffload++;
273 #endif
275 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
276 ASSERT(mdl);
278 ip4_length = GET_NET_USHORT(buffer[XN_HDR_SIZE + 2]);
280 if ((USHORT)(ip4_length + XN_HDR_SIZE) != total_length)
281 {
282 KdPrint((__DRIVER_NAME " Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length));
283 }
285 switch (xi->rxpi.ip_proto)
286 {
287 case 6:
288 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 16];
289 break;
290 case 17:
291 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 6];
292 break;
293 default:
294 KdPrint((__DRIVER_NAME " Don't know how to calc sum for IP Proto %d\n", xi->rxpi.ip_proto));
295 return;
296 }
298 *csum_ptr = 0;
300 csum = 0;
301 csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 12]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 14]); // src
302 csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 16]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 18]); // dst
303 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
305 remaining = ip4_length - xi->rxpi.ip4_header_length;
307 csum += remaining;
309 for (buffer_offset = i = XN_HDR_SIZE + xi->rxpi.ip4_header_length; i < total_length - 1; i += 2, buffer_offset += 2)
310 {
311 if (buffer_offset == buffer_length - 1) // deal with a buffer ending on an odd byte boundary
312 {
313 csum += (USHORT)buffer[buffer_offset] << 8;
314 NdisGetNextBuffer(mdl, &mdl);
315 if (mdl == NULL)
316 {
317 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
318 return;
319 }
320 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
321 // KdPrint((__DRIVER_NAME " New buffer - unaligned...\n"));
322 csum += ((USHORT)buffer[0]);
323 buffer_offset = (USHORT)-1;
324 }
325 else
326 {
327 if (buffer_offset == buffer_length)
328 {
329 // KdPrint((__DRIVER_NAME " New buffer - aligned...\n"));
330 NdisGetNextBuffer(mdl, &mdl);
331 if (mdl == NULL)
332 {
333 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
334 return;
335 }
336 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
337 buffer_offset = 0;
338 }
339 csum += GET_NET_USHORT(buffer[buffer_offset]);
340 }
341 }
342 if (i != total_length) // last odd byte
343 {
344 csum += ((USHORT)buffer[buffer_offset] << 8);
345 }
346 while (csum & 0xFFFF0000)
347 csum = (csum & 0xFFFF) + (csum >> 16);
348 *csum_ptr = (USHORT)~GET_NET_USHORT(csum);
350 // KdPrint((__DRIVER_NAME " csum = %04x\n", *csum_ptr));
352 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
353 }
355 static PUCHAR
356 XenNet_GetData(
357 struct xennet_info *xi,
358 USHORT req_length,
359 PUSHORT length
360 )
361 {
362 PNDIS_BUFFER mdl = xi->rxpi.mdls[xi->rxpi.curr_mdl];
363 PUCHAR buffer = (PUCHAR)MmGetMdlVirtualAddress(mdl) + xi->rxpi.curr_mdl_offset;
365 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
367 *length = (USHORT)min(req_length, MmGetMdlByteCount(mdl) - xi->rxpi.curr_mdl_offset);
369 // KdPrint((__DRIVER_NAME " req_length = %d, length = %d\n", req_length, *length));
371 xi->rxpi.curr_mdl_offset = xi->rxpi.curr_mdl_offset + *length;
372 if (xi->rxpi.curr_mdl_offset == MmGetMdlByteCount(mdl))
373 {
374 xi->rxpi.curr_mdl++;
375 xi->rxpi.curr_mdl_offset = 0;
376 }
378 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
380 return buffer;
381 }
383 static PNDIS_PACKET
384 XenNet_MakePacket(
385 struct xennet_info *xi
386 )
387 {
388 PNDIS_PACKET packet;
389 PUCHAR in_buffer;
390 PNDIS_BUFFER out_mdl;
391 PUCHAR out_buffer;
392 USHORT out_offset;
393 USHORT out_remaining;
394 USHORT length;
395 USHORT new_ip4_length;
396 NDIS_STATUS status;
397 USHORT i;
399 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
401 NdisAllocatePacket(&status, &packet, xi->packet_pool);
402 ASSERT(status == NDIS_STATUS_SUCCESS);
403 xi->rx_outstanding++;
404 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
406 if (!xi->rxpi.split_required)
407 {
408 for (i = 0; i < xi->rxpi.mdl_count; i++)
409 NdisChainBufferAtBack(packet, xi->rxpi.mdls[i]);
410 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
411 }
412 else
413 {
414 out_mdl = get_page_from_freelist(xi);
415 out_buffer = MmGetMdlVirtualAddress(out_mdl);
416 out_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
417 out_remaining = min(xi->rxpi.mss, xi->rxpi.tcp_remaining);
418 NdisAdjustBufferLength(out_mdl, out_offset + out_remaining);
419 memcpy(out_buffer, xi->rxpi.header, out_offset);
420 new_ip4_length = out_remaining + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
421 SET_NET_USHORT(out_buffer[XN_HDR_SIZE + 2], new_ip4_length);
422 SET_NET_ULONG(out_buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 4], xi->rxpi.tcp_seq);
423 xi->rxpi.tcp_seq += out_remaining;
424 xi->rxpi.tcp_remaining = xi->rxpi.tcp_remaining - out_remaining;
425 do
426 {
427 ASSERT(xi->rxpi.curr_mdl < xi->rxpi.mdl_count);
428 in_buffer = XenNet_GetData(xi, out_remaining, &length);
429 memcpy(&out_buffer[out_offset], in_buffer, length);
430 out_remaining = out_remaining - length;
431 out_offset = out_offset + length;
432 } while (out_remaining != 0); // && in_buffer != NULL);
433 NdisChainBufferAtBack(packet, out_mdl);
434 XenNet_SumIpHeader(xi, packet);
435 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
436 }
438 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (%p)\n", packet));
439 return packet;
440 }
442 static VOID
443 XenNet_MakePackets(
444 struct xennet_info *xi,
445 PNDIS_PACKET *packets,
446 PULONG packet_count_p
447 )
448 {
449 USHORT i;
451 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "(packets = %p, packet_count = %d)\n", packets, *packet_count_p));
453 XenNet_ParseHeader(xi);
454 switch (xi->rxpi.ip_proto)
455 {
456 case 6: // TCP
457 if (xi->rxpi.split_required)
458 break;
459 // fallthrough
460 case 17: // UDP
461 packets[*packet_count_p] = XenNet_MakePacket(xi);
462 if (xi->rxpi.csum_calc_required)
463 XenNet_SumPacketData(xi, packets[*packet_count_p]);
464 (*packet_count_p)++;
465 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (TCP/UDP)\n"));
466 return;
467 default:
468 packets[*packet_count_p] = XenNet_MakePacket(xi);
469 (*packet_count_p)++;
470 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (Other)\n"));
471 return;
472 }
473 // KdPrint((__DRIVER_NAME " splitting packet\n"));
474 xi->rxpi.tcp_remaining = xi->rxpi.tcp_length;
475 if (MmGetMdlByteCount(xi->rxpi.mdls[0]) > (ULONG)(XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length))
476 xi->rxpi.curr_mdl_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
477 else
478 xi->rxpi.curr_mdl = 1;
480 while (xi->rxpi.tcp_remaining)
481 {
482 // KdPrint((__DRIVER_NAME " tcp_remaining = %d\n", xi->rxpi.tcp_remaining));
483 packets[*packet_count_p] = XenNet_MakePacket(xi);
484 XenNet_SumPacketData(xi, packets[*packet_count_p]);
485 (*packet_count_p)++;
486 }
487 ASSERT(xi->rxpi.curr_mdl == xi->rxpi.mdl_count);
488 // KdPrint((__DRIVER_NAME " tcp_remaining = %d\n", xi->rxpi.tcp_remaining));
489 // TODO: restore psh status to last packet
490 for (i = 0; i < xi->rxpi.mdl_count; i++)
491 {
492 NdisAdjustBufferLength(xi->rxpi.mdls[i], PAGE_SIZE);
493 put_page_on_freelist(xi, xi->rxpi.mdls[i]);
494 }
495 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (split)\n"));
496 }
498 // Called at DISPATCH_LEVEL
499 NDIS_STATUS
500 XenNet_RxBufferCheck(struct xennet_info *xi)
501 {
502 RING_IDX cons, prod;
503 PNDIS_PACKET packets[NET_RX_RING_SIZE];
504 ULONG packet_count;
505 PMDL mdl;
506 int moretodo;
507 struct netif_rx_response *rxrsp = NULL;
508 struct netif_extra_info *ei;
509 USHORT id;
510 int cycles = 0;
511 #if defined(XEN_PROFILE)
512 LARGE_INTEGER tsc, tsc2, dummy;
513 #endif
515 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
517 #if defined(XEN_PROFILE)
518 tsc = tsc2 = KeQueryPerformanceCounter(&dummy);
519 #endif
521 ASSERT(xi->connected);
523 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
525 packet_count = 0;
526 do {
527 ASSERT(cycles++ < 256);
528 prod = xi->rx.sring->rsp_prod;
529 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
531 for (cons = xi->rx.rsp_cons; cons != prod; cons++)
532 {
533 ASSERT(cycles++ < 256);
534 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
535 mdl = xi->rx_buffers[id];
536 xi->rx_buffers[id] = NULL;
537 xi->rx_id_free++;
538 if (xi->rxpi.extra_info)
539 {
540 put_page_on_freelist(xi, mdl);
541 ei = (struct netif_extra_info *)RING_GET_RESPONSE(&xi->rx, cons);
542 xi->rxpi.extra_info = (BOOLEAN)!!(ei->flags & XEN_NETIF_EXTRA_FLAG_MORE);
543 switch (ei->type)
544 {
545 case XEN_NETIF_EXTRA_TYPE_GSO:
546 switch (ei->u.gso.type)
547 {
548 case XEN_NETIF_GSO_TYPE_TCPV4:
549 xi->rxpi.mss = ei->u.gso.size;
550 // TODO - put this assertion somewhere ASSERT(header_len + xi->rxpi.mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
551 break;
552 default:
553 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
554 break;
555 }
556 break;
557 default:
558 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
559 break;
560 }
561 }
562 else
563 {
564 rxrsp = RING_GET_RESPONSE(&xi->rx, cons);
565 if (rxrsp->status <= 0
566 || rxrsp->offset + rxrsp->status > PAGE_SIZE)
567 {
568 KdPrint((__DRIVER_NAME ": Error: rxrsp offset %d, size %d\n",
569 rxrsp->offset, rxrsp->status));
570 put_page_on_freelist(xi, mdl);
571 continue;
572 }
573 ASSERT(rxrsp->id == id);
574 if (!xi->rxpi.more_frags) // handling the packet's 1st buffer
575 {
576 if (rxrsp->flags & NETRXF_csum_blank)
577 xi->rxpi.csum_calc_required = TRUE;
578 #if 0
579 if (rxrsp->flags & (NETRXF_csum_blank|NETRXF_data_validated) && xi->config_csum)
580 {
581 //KdPrint((__DRIVER_NAME " RX csum blank = %d, validated = %d\n", !!(rxrsp->flags & NETRXF_csum_blank), !!(rxrsp->flags & NETRXF_data_validated)));
582 if (rxrsp->flags & NETRXF_csum_blank)
583 xi->rxpi.csum_calc_required = TRUE;
584 }
585 #endif
586 }
587 NdisAdjustBufferLength(mdl, rxrsp->status);
588 xi->rxpi.mdls[xi->rxpi.mdl_count++] = mdl;
589 xi->rxpi.extra_info = (BOOLEAN)!!(rxrsp->flags & NETRXF_extra_info);
590 xi->rxpi.more_frags = (BOOLEAN)!!(rxrsp->flags & NETRXF_more_data);
591 xi->rxpi.total_length = xi->rxpi.total_length + rxrsp->status;
592 }
594 /* Packet done, add it to the list */
595 if (!xi->rxpi.more_frags && !xi->rxpi.extra_info)
596 {
597 XenNet_MakePackets(xi, packets, &packet_count);
598 RtlZeroMemory(&xi->rxpi, sizeof(xi->rxpi));
599 }
600 }
601 ASSERT(packet_count < NET_RX_RING_SIZE);
602 xi->rx.rsp_cons = prod;
603 RING_FINAL_CHECK_FOR_RESPONSES(&xi->rx, moretodo);
604 } while (moretodo);
606 /* Give netback more buffers */
607 XenNet_RxBufferAlloc(xi);
609 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
611 #if defined(XEN_PROFILE)
612 ProfTime_RxBufferCheckTopHalf.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc2.QuadPart;
613 tsc2 = KeQueryPerformanceCounter(&dummy);
614 #endif
616 if (packet_count > 0)
617 {
618 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
619 #if defined(XEN_PROFILE)
620 ProfCount_CallsToIndicateReceive++;
621 #endif
622 }
624 #if defined(XEN_PROFILE)
625 ProfTime_RxBufferCheck.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
626 ProfTime_RxBufferCheckBotHalf.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc2.QuadPart;
627 ProfCount_RxBufferCheck++;
628 #endif
630 return NDIS_STATUS_SUCCESS;
631 }
633 /* called at DISPATCH_LEVEL */
635 VOID
636 XenNet_ReturnPacket(
637 IN NDIS_HANDLE MiniportAdapterContext,
638 IN PNDIS_PACKET Packet
639 )
640 {
641 struct xennet_info *xi = MiniportAdapterContext;
642 PMDL mdl;
643 int cycles = 0;
644 #if defined(XEN_PROFILE)
645 LARGE_INTEGER tsc, dummy;
646 #endif
648 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (%p)\n", Packet));
650 #if defined(XEN_PROFILE)
651 tsc = KeQueryPerformanceCounter(&dummy);
652 #endif
654 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
656 NdisUnchainBufferAtBack(Packet, &mdl);
657 while (mdl)
658 {
659 ASSERT(cycles++ < 256);
660 NdisAdjustBufferLength(mdl, PAGE_SIZE);
661 put_page_on_freelist(xi, mdl);
662 NdisUnchainBufferAtBack(Packet, &mdl);
663 }
665 NdisFreePacket(Packet);
666 xi->rx_outstanding--;
668 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
670 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
672 #if defined(XEN_PROFILE)
673 ProfTime_ReturnPacket.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
674 ProfCount_ReturnPacket++;
675 #endif
676 }
678 /*
679 Free all Rx buffers (on halt, for example)
680 The ring must be stopped at this point.
681 */
682 static void
683 XenNet_RxBufferFree(struct xennet_info *xi)
684 {
685 int i;
686 PMDL mdl;
688 ASSERT(!xi->connected);
690 for (i = 0; i < NET_RX_RING_SIZE; i++)
691 {
692 if (!xi->rx_buffers[i])
693 continue;
695 mdl = xi->rx_buffers[i];
696 NdisAdjustBufferLength(mdl, PAGE_SIZE);
697 put_page_on_freelist(xi, mdl);
698 }
699 }
701 BOOLEAN
702 XenNet_RxInit(xennet_info_t *xi)
703 {
704 int i;
706 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
708 xi->rx_mdl = AllocatePage();
709 xi->rx_pgs = MmGetMdlVirtualAddress(xi->rx_mdl);
710 SHARED_RING_INIT(xi->rx_pgs);
711 FRONT_RING_INIT(&xi->rx, xi->rx_pgs, PAGE_SIZE);
712 xi->rx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
713 xi->XenInterface.InterfaceHeader.Context, 0,
714 *MmGetMdlPfnArray(xi->rx_mdl), FALSE, 0);
715 xi->rx_id_free = NET_RX_RING_SIZE;
717 for (i = 0; i < NET_RX_RING_SIZE; i++)
718 {
719 xi->rx_buffers[i] = NULL;
720 }
722 xi->rx_outstanding = 0;
723 XenNet_RxBufferAlloc(xi);
725 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
727 return TRUE;
728 }
730 BOOLEAN
731 XenNet_RxShutdown(xennet_info_t *xi)
732 {
733 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
735 XenNet_RxBufferFree(xi);
737 free_page_freelist(xi);
739 /* free RX resources */
740 if (xi->XenInterface.GntTbl_EndAccess(
741 xi->XenInterface.InterfaceHeader.Context, xi->rx_ring_ref, 0))
742 {
743 xi->rx_ring_ref = GRANT_INVALID_REF;
744 FreePages(xi->rx_mdl);
745 }
746 xi->rx_pgs = NULL;
748 ASSERT(xi->rx_outstanding == 0);
750 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
752 return TRUE;
753 }