win-pvdrivers

view xennet/xennet_rx.c @ 266:b88529df8b60

More wdm updates
author James Harper <james.harper@bendigoit.com.au>
date Wed May 07 10:47:03 2008 +1000 (2008-05-07)
parents 253ec5052cb4
children 2fc877b00cfd da9b1e17fbc0
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 // Called at DISPATCH_LEVEL with rx lock held
24 static NDIS_STATUS
25 XenNet_RxBufferAlloc(struct xennet_info *xi)
26 {
27 unsigned short id;
28 PMDL mdl;
29 int i, batch_target, notify;
30 RING_IDX req_prod = xi->rx.req_prod_pvt;
31 netif_rx_request_t *req;
32 int cycles = 0;
33 #if defined(XEN_PROFILE)
34 LARGE_INTEGER tsc, dummy;
35 #endif
37 //KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
38 #if defined(XEN_PROFILE)
39 tsc = KeQueryPerformanceCounter(&dummy);
40 #endif
42 batch_target = xi->rx_target - (req_prod - xi->rx.rsp_cons);
44 for (i = 0; i < batch_target; i++)
45 {
46 ASSERT(cycles++ < 256);
47 if (xi->rx_id_free == 0)
48 {
49 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring (ran out of id's)\n", i, batch_target));
50 break;
51 }
52 mdl = XenFreelist_GetPage(&xi->rx_freelist);
53 if (mdl == NULL)
54 {
55 KdPrint((__DRIVER_NAME " Added %d out of %d buffers to rx ring (no free pages)\n", i, batch_target));
56 break;
57 }
58 xi->rx_id_free--;
60 /* Give to netback */
61 id = (USHORT)((req_prod + i) & (NET_RX_RING_SIZE - 1));
62 // KdPrint((__DRIVER_NAME " id = %d\n", id));
63 ASSERT(xi->rx_buffers[id] == NULL);
64 xi->rx_buffers[id] = mdl;
65 req = RING_GET_REQUEST(&xi->rx, req_prod + i);
66 req->gref = get_grant_ref(mdl);
67 req->id = id;
68 }
70 xi->rx.req_prod_pvt = req_prod + i;
71 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->rx, notify);
72 if (notify)
73 {
74 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
75 xi->event_channel);
76 }
79 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
81 #if defined(XEN_PROFILE)
82 ProfTime_RxBufferAlloc.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
83 ProfCount_RxBufferAlloc++;
84 #endif
86 return NDIS_STATUS_SUCCESS;
87 }
89 /*
90 Windows appears to insist that the checksum on received packets is correct, and won't
91 believe us when we lie about it, which happens when the packet is generated on the
92 same bridge in Dom0. Doh!
93 This is only for TCP and UDP packets. IP checksums appear to be correct anyways.
94 */
95 static VOID
96 XenNet_SumPacketData(
97 packet_info_t *pi,
98 PNDIS_PACKET packet
99 )
100 {
101 USHORT i;
102 PUCHAR buffer;
103 PMDL mdl;
104 UINT total_length;
105 UINT buffer_length;
106 USHORT buffer_offset;
107 ULONG csum;
108 PUSHORT csum_ptr;
109 USHORT remaining;
110 USHORT ip4_length;
112 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
114 #if defined(XEN_PROFILE)
115 ProfCount_RxPacketsCsumOffload++;
116 #endif
118 NdisGetFirstBufferFromPacketSafe(packet, &mdl, &buffer, &buffer_length, &total_length, NormalPagePriority);
119 ASSERT(mdl);
121 ip4_length = GET_NET_USHORT(buffer[XN_HDR_SIZE + 2]);
123 if ((USHORT)(ip4_length + XN_HDR_SIZE) != total_length)
124 {
125 KdPrint((__DRIVER_NAME " Size Mismatch %d (ip4_length + XN_HDR_SIZE) != %d (total_length)\n", ip4_length + XN_HDR_SIZE, total_length));
126 }
128 switch (pi->ip_proto)
129 {
130 case 6:
131 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 16];
132 break;
133 case 17:
134 csum_ptr = (USHORT *)&buffer[XN_HDR_SIZE + pi->ip4_header_length + 6];
135 break;
136 default:
137 KdPrint((__DRIVER_NAME " Don't know how to calc sum for IP Proto %d\n", pi->ip_proto));
138 return;
139 }
141 *csum_ptr = 0;
143 csum = 0;
144 csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 12]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 14]); // src
145 csum += GET_NET_USHORT(buffer[XN_HDR_SIZE + 16]) + GET_NET_USHORT(buffer[XN_HDR_SIZE + 18]); // dst
146 csum += ((USHORT)buffer[XN_HDR_SIZE + 9]);
148 remaining = ip4_length - pi->ip4_header_length;
150 csum += remaining;
152 for (buffer_offset = i = XN_HDR_SIZE + pi->ip4_header_length; i < total_length - 1; i += 2, buffer_offset += 2)
153 {
154 if (buffer_offset == buffer_length - 1) // deal with a buffer ending on an odd byte boundary
155 {
156 csum += (USHORT)buffer[buffer_offset] << 8;
157 NdisGetNextBuffer(mdl, &mdl);
158 if (mdl == NULL)
159 {
160 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
161 return;
162 }
163 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
164 csum += ((USHORT)buffer[0]);
165 buffer_offset = (USHORT)-1;
166 }
167 else
168 {
169 if (buffer_offset == buffer_length)
170 {
171 // KdPrint((__DRIVER_NAME " New buffer - aligned...\n"));
172 NdisGetNextBuffer(mdl, &mdl);
173 if (mdl == NULL)
174 {
175 KdPrint((__DRIVER_NAME " Ran out of buffers\n"));
176 return;
177 }
178 NdisQueryBufferSafe(mdl, &buffer, &buffer_length, NormalPagePriority);
179 buffer_offset = 0;
180 }
181 csum += GET_NET_USHORT(buffer[buffer_offset]);
182 }
183 }
184 if (i != total_length) // last odd byte
185 {
186 csum += ((USHORT)buffer[buffer_offset] << 8);
187 }
188 while (csum & 0xFFFF0000)
189 csum = (csum & 0xFFFF) + (csum >> 16);
190 *csum_ptr = (USHORT)~GET_NET_USHORT(csum);
192 // KdPrint((__DRIVER_NAME " csum = %04x\n", *csum_ptr));
194 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
195 }
197 static PNDIS_PACKET
198 get_packet_from_freelist(struct xennet_info *xi)
199 {
200 NDIS_STATUS status;
201 PNDIS_PACKET packet;
203 if (!xi->rx_packet_free)
204 {
205 NdisAllocatePacket(&status, &packet, xi->packet_pool);
206 ASSERT(status == NDIS_STATUS_SUCCESS);
207 NDIS_SET_PACKET_HEADER_SIZE(packet, XN_HDR_SIZE);
208 }
209 else
210 {
211 xi->rx_packet_free--;
212 packet = xi->rx_packet_list[xi->rx_packet_free];
213 }
214 return packet;
215 }
217 static VOID
218 put_packet_on_freelist(struct xennet_info *xi, PNDIS_PACKET packet)
219 {
220 NdisReinitializePacket(packet);
221 xi->rx_packet_list[xi->rx_packet_free] = packet;
222 xi->rx_packet_free++;
223 }
225 static VOID
226 packet_freelist_dispose(struct xennet_info *xi)
227 {
228 while(xi->rx_packet_free != 0)
229 {
230 xi->rx_packet_free--;
231 NdisFreePacket(xi->rx_packet_list[xi->rx_packet_free]);
232 }
233 }
235 static PNDIS_PACKET
236 XenNet_MakePacket(
237 struct xennet_info *xi
238 )
239 {
240 PNDIS_PACKET packet;
241 PUCHAR in_buffer;
242 PNDIS_BUFFER out_mdl;
243 PUCHAR out_buffer;
244 USHORT out_offset;
245 USHORT out_remaining;
246 USHORT length;
247 USHORT new_ip4_length;
248 //NDIS_STATUS status;
249 USHORT i;
251 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
253 packet = get_packet_from_freelist(xi);
254 xi->rx_outstanding++;
256 if (!xi->rxpi.split_required)
257 {
258 for (i = 0; i < xi->rxpi.mdl_count; i++)
259 NdisChainBufferAtBack(packet, xi->rxpi.mdls[i]);
260 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
261 }
262 else
263 {
264 out_mdl = XenFreelist_GetPage(&xi->rx_freelist);
265 out_buffer = MmGetMdlVirtualAddress(out_mdl);
266 out_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
267 out_remaining = min(xi->rxpi.mss, xi->rxpi.tcp_remaining);
268 NdisAdjustBufferLength(out_mdl, out_offset + out_remaining);
269 memcpy(out_buffer, xi->rxpi.header, out_offset);
270 new_ip4_length = out_remaining + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
271 SET_NET_USHORT(out_buffer[XN_HDR_SIZE + 2], new_ip4_length);
272 SET_NET_ULONG(out_buffer[XN_HDR_SIZE + xi->rxpi.ip4_header_length + 4], xi->rxpi.tcp_seq);
273 xi->rxpi.tcp_seq += out_remaining;
274 xi->rxpi.tcp_remaining = xi->rxpi.tcp_remaining - out_remaining;
275 do
276 {
277 ASSERT(xi->rxpi.curr_mdl < xi->rxpi.mdl_count);
278 in_buffer = XenNet_GetData(&xi->rxpi, out_remaining, &length);
279 memcpy(&out_buffer[out_offset], in_buffer, length);
280 out_remaining = out_remaining - length;
281 out_offset = out_offset + length;
282 } while (out_remaining != 0); // && in_buffer != NULL);
283 NdisChainBufferAtBack(packet, out_mdl);
284 XenNet_SumIpHeader(out_buffer, xi->rxpi.ip4_header_length);
285 NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
286 }
288 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (%p)\n", packet));
289 return packet;
290 }
292 static VOID
293 XenNet_MakePackets(
294 struct xennet_info *xi,
295 PNDIS_PACKET *packets,
296 PULONG packet_count_p
297 )
298 {
299 USHORT i;
301 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "(packets = %p, packet_count = %d)\n", packets, *packet_count_p));
303 XenNet_ParsePacketHeader(&xi->rxpi);
304 switch (xi->rxpi.ip_proto)
305 {
306 case 6: // TCP
307 if (xi->rxpi.split_required)
308 break;
309 // fallthrough
310 case 17: // UDP
311 ASSERT(*packet_count_p < NET_RX_RING_SIZE);
312 packets[*packet_count_p] = XenNet_MakePacket(xi);
313 if (xi->rxpi.csum_calc_required)
314 XenNet_SumPacketData(&xi->rxpi, packets[*packet_count_p]);
315 (*packet_count_p)++;
316 return;
317 default:
318 ASSERT(*packet_count_p < NET_RX_RING_SIZE);
319 packets[*packet_count_p] = XenNet_MakePacket(xi);
320 (*packet_count_p)++;
321 return;
322 }
323 // KdPrint((__DRIVER_NAME " splitting packet\n"));
324 xi->rxpi.tcp_remaining = xi->rxpi.tcp_length;
325 if (MmGetMdlByteCount(xi->rxpi.mdls[0]) > (ULONG)(XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length))
326 xi->rxpi.curr_mdl_offset = XN_HDR_SIZE + xi->rxpi.ip4_header_length + xi->rxpi.tcp_header_length;
327 else
328 xi->rxpi.curr_mdl = 1;
330 while (xi->rxpi.tcp_remaining)
331 {
332 ASSERT(*packet_count_p < NET_RX_RING_SIZE);
333 packets[*packet_count_p] = XenNet_MakePacket(xi);
334 XenNet_SumPacketData(&xi->rxpi, packets[*packet_count_p]);
335 (*packet_count_p)++;
336 }
338 ASSERT(xi->rxpi.curr_mdl == xi->rxpi.mdl_count);
339 // TODO: restore psh status to last packet
340 for (i = 0; i < xi->rxpi.mdl_count; i++)
341 {
342 NdisAdjustBufferLength(xi->rxpi.mdls[i], PAGE_SIZE);
343 XenFreelist_PutPage(&xi->rx_freelist, xi->rxpi.mdls[i]);
344 }
345 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ " (split)\n"));
346 }
348 #define MAXIMUM_PACKETS_PER_INTERRUPT 128
349 #define MAXIMUM_PACKETS_PER_INDICATE 32
351 // Called at DISPATCH_LEVEL
352 NDIS_STATUS
353 XenNet_RxBufferCheck(struct xennet_info *xi)
354 {
355 RING_IDX cons, prod;
356 /* the highest number of packets we receive could be (65535 - header) / mss
357 for a low mss this could be even higher than NET_RX_RING_SIZE...
358 what can we do? */
359 PNDIS_PACKET packets[NET_RX_RING_SIZE];
360 ULONG packet_count = 0;
361 ULONG total_packets = 0;
362 PMDL mdl;
363 int moretodo;
364 struct netif_rx_response *rxrsp = NULL;
365 struct netif_extra_info *ei;
366 USHORT id;
367 int cycles = 0;
368 #if defined(XEN_PROFILE)
369 LARGE_INTEGER tsc, dummy;
370 #endif
372 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
374 #if defined(XEN_PROFILE)
375 tsc = KeQueryPerformanceCounter(&dummy);
376 #endif
378 ASSERT(xi->connected);
380 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
382 do {
383 ASSERT(cycles++ < 256);
384 prod = xi->rx.sring->rsp_prod;
385 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
387 for (cons = xi->rx.rsp_cons; cons != prod && packet_count < MAXIMUM_PACKETS_PER_INDICATE; cons++)
388 {
389 ASSERT(cycles++ < 256);
390 id = (USHORT)(cons & (NET_RX_RING_SIZE - 1));
391 ASSERT(xi->rx_buffers[id]);
392 mdl = xi->rx_buffers[id];
393 xi->rx_buffers[id] = NULL;
394 xi->rx_id_free++;
395 if (xi->rxpi.extra_info)
396 {
397 XenFreelist_PutPage(&xi->rx_freelist, mdl);
398 ei = (struct netif_extra_info *)RING_GET_RESPONSE(&xi->rx, cons);
399 xi->rxpi.extra_info = (BOOLEAN)!!(ei->flags & XEN_NETIF_EXTRA_FLAG_MORE);
400 switch (ei->type)
401 {
402 case XEN_NETIF_EXTRA_TYPE_GSO:
403 switch (ei->u.gso.type)
404 {
405 case XEN_NETIF_GSO_TYPE_TCPV4:
406 xi->rxpi.mss = ei->u.gso.size;
407 // TODO - put this assertion somewhere ASSERT(header_len + xi->rxpi.mss <= PAGE_SIZE); // this limits MTU to PAGE_SIZE - XN_HEADER_LEN
408 break;
409 default:
410 KdPrint((__DRIVER_NAME " Unknown GSO type (%d) detected\n", ei->u.gso.type));
411 break;
412 }
413 break;
414 default:
415 KdPrint((__DRIVER_NAME " Unknown extra info type (%d) detected\n", ei->type));
416 break;
417 }
418 }
419 else
420 {
421 rxrsp = RING_GET_RESPONSE(&xi->rx, cons);
422 if (rxrsp->status <= 0
423 || rxrsp->offset + rxrsp->status > PAGE_SIZE)
424 {
425 KdPrint((__DRIVER_NAME ": Error: rxrsp offset %d, size %d\n",
426 rxrsp->offset, rxrsp->status));
427 ASSERT(!xi->rxpi.extra_info);
428 XenFreelist_PutPage(&xi->rx_freelist, mdl);
429 continue;
430 }
431 ASSERT(rxrsp->id == id);
432 if (!xi->rxpi.more_frags) // handling the packet's 1st buffer
433 {
434 if (rxrsp->flags & NETRXF_csum_blank)
435 xi->rxpi.csum_calc_required = TRUE;
436 }
437 NdisAdjustBufferLength(mdl, rxrsp->status);
438 xi->rxpi.mdls[xi->rxpi.mdl_count++] = mdl;
439 xi->rxpi.extra_info = (BOOLEAN)!!(rxrsp->flags & NETRXF_extra_info);
440 xi->rxpi.more_frags = (BOOLEAN)!!(rxrsp->flags & NETRXF_more_data);
441 xi->rxpi.total_length = xi->rxpi.total_length + rxrsp->status;
442 }
444 /* Packet done, add it to the list */
445 if (!xi->rxpi.more_frags && !xi->rxpi.extra_info)
446 {
447 /* we should probably check here and make sure that we have enough
448 space for these packets, and if we don't, defer MakePackets until
449 we have Indicated the current packets... */
450 XenNet_MakePackets(xi, packets, &packet_count);
451 RtlZeroMemory(&xi->rxpi, sizeof(xi->rxpi));
452 }
453 }
454 xi->rx.rsp_cons = cons;
456 XenNet_RxBufferAlloc(xi);
458 if (packet_count > 0)
459 {
460 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
461 NdisMIndicateReceivePacket(xi->adapter_handle, packets, packet_count);
462 #if defined(XEN_PROFILE)
463 ProfCount_CallsToIndicateReceive++;
464 #endif
465 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
466 total_packets += packet_count;
467 packet_count = 0;
468 }
470 RING_FINAL_CHECK_FOR_RESPONSES(&xi->rx, moretodo);
471 } while (moretodo && total_packets < MAXIMUM_PACKETS_PER_INTERRUPT);
473 /* Give netback more buffers */
474 XenNet_RxBufferAlloc(xi);
476 if (xi->rxpi.more_frags || xi->rxpi.extra_info)
477 KdPrint(("Partial receive (more_frags = %d, extra_info = %d, total_length = %d, mdl_count = %d)\n", xi->rxpi.more_frags, xi->rxpi.extra_info, xi->rxpi.total_length, xi->rxpi.mdl_count));
479 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
481 #if defined(XEN_PROFILE)
482 ProfTime_RxBufferCheck.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
483 ProfCount_RxBufferCheck++;
484 #endif
486 return NDIS_STATUS_SUCCESS;
487 }
489 /* called at DISPATCH_LEVEL */
491 VOID
492 XenNet_ReturnPacket(
493 IN NDIS_HANDLE MiniportAdapterContext,
494 IN PNDIS_PACKET Packet
495 )
496 {
497 struct xennet_info *xi = MiniportAdapterContext;
498 PMDL mdl;
499 int cycles = 0;
500 #if defined(XEN_PROFILE)
501 LARGE_INTEGER tsc, dummy;
502 #endif
504 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (%p)\n", Packet));
506 #if defined(XEN_PROFILE)
507 tsc = KeQueryPerformanceCounter(&dummy);
508 #endif
510 KeAcquireSpinLockAtDpcLevel(&xi->rx_lock);
512 NdisUnchainBufferAtBack(Packet, &mdl);
513 while (mdl)
514 {
515 ASSERT(cycles++ < 256);
516 NdisAdjustBufferLength(mdl, PAGE_SIZE);
517 XenFreelist_PutPage(&xi->rx_freelist, mdl);
518 NdisUnchainBufferAtBack(Packet, &mdl);
519 }
521 put_packet_on_freelist(xi, Packet);
522 xi->rx_outstanding--;
524 KeReleaseSpinLockFromDpcLevel(&xi->rx_lock);
526 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
528 #if defined(XEN_PROFILE)
529 ProfTime_ReturnPacket.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
530 ProfCount_ReturnPacket++;
531 #endif
532 }
534 /*
535 Free all Rx buffers (on halt, for example)
536 The ring must be stopped at this point.
537 */
539 static void
540 XenNet_RxBufferFree(struct xennet_info *xi)
541 {
542 int i;
543 PMDL mdl;
545 ASSERT(!xi->connected);
547 for (i = 0; i < NET_RX_RING_SIZE; i++)
548 {
549 if (!xi->rx_buffers[i])
550 continue;
552 mdl = xi->rx_buffers[i];
553 NdisAdjustBufferLength(mdl, PAGE_SIZE);
554 XenFreelist_PutPage(&xi->rx_freelist, mdl);
555 }
556 }
558 BOOLEAN
559 XenNet_RxInit(xennet_info_t *xi)
560 {
561 int i;
563 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
565 xi->rx_mdl = AllocatePage();
566 xi->rx_pgs = MmGetMdlVirtualAddress(xi->rx_mdl);
567 SHARED_RING_INIT(xi->rx_pgs);
568 FRONT_RING_INIT(&xi->rx, xi->rx_pgs, PAGE_SIZE);
569 xi->rx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
570 xi->XenInterface.InterfaceHeader.Context, 0,
571 *MmGetMdlPfnArray(xi->rx_mdl), FALSE, 0);
572 xi->rx_id_free = NET_RX_RING_SIZE;
574 for (i = 0; i < NET_RX_RING_SIZE; i++)
575 {
576 xi->rx_buffers[i] = NULL;
577 }
579 xi->rx_outstanding = 0;
581 XenFreelist_Init(xi, &xi->rx_freelist, &xi->rx_lock);
583 XenNet_RxBufferAlloc(xi);
585 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
587 return TRUE;
588 }
590 BOOLEAN
591 XenNet_RxShutdown(xennet_info_t *xi)
592 {
593 KIRQL OldIrql;
595 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
597 KeAcquireSpinLock(&xi->rx_lock, &OldIrql);
599 XenNet_RxBufferFree(xi);
601 XenFreelist_Dispose(&xi->rx_freelist);
603 packet_freelist_dispose(xi);
605 /* free RX resources */
606 ASSERT(xi->XenInterface.GntTbl_EndAccess(
607 xi->XenInterface.InterfaceHeader.Context, xi->rx_ring_ref, 0));
608 FreePages(xi->rx_mdl);
609 xi->rx_pgs = NULL;
611 ASSERT(xi->rx_outstanding == 0);
613 KeReleaseSpinLock(&xi->rx_lock, OldIrql);
615 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
617 return TRUE;
618 }