win-pvdrivers

view xennet/xennet_tx.c @ 244:d31884ed02a9

tweaked a few things to make crashes go away
author James Harper <james.harper@bendigoit.com.au>
date Wed Apr 02 00:21:09 2008 +1100 (2008-04-02)
parents dce3943a11ec
children 1b1f26917b6f
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 #define FREELIST_ID_ERROR 0xFFFF
25 #ifdef XEN_PROFILE
26 #define PC_INC(var) var++
27 #else
28 #define PC_INC(var)
29 #endif
31 static ULONG
32 free_requests(struct xennet_info *xi)
33 {
34 return xi->tx_id_free;
35 }
37 static USHORT
38 get_id_from_freelist(struct xennet_info *xi)
39 {
40 if (xi->tx_id_free - xi->tx_no_id_free == 0)
41 return FREELIST_ID_ERROR;
42 xi->tx_id_free--;
43 return xi->tx_id_list[xi->tx_id_free];
44 }
46 static USHORT
47 get_no_id_from_freelist(struct xennet_info *xi)
48 {
49 if (xi->tx_id_free - xi->tx_no_id_free == 0)
50 return FREELIST_ID_ERROR;
51 xi->tx_no_id_free--;
52 return 0;
53 }
55 static VOID
56 put_id_on_freelist(struct xennet_info *xi, USHORT id)
57 {
58 xi->tx_id_list[xi->tx_id_free] = id;
59 xi->tx_id_free++;
60 }
62 static VOID
63 put_no_id_on_freelist(struct xennet_info *xi)
64 {
65 xi->tx_no_id_free++;
66 }
68 static grant_ref_t
69 get_gref_from_freelist(struct xennet_info *xi)
70 {
71 if (xi->tx_gref_free == 0)
72 return 0;
73 xi->tx_gref_free--;
74 return xi->tx_gref_list[xi->tx_gref_free];
75 }
77 static VOID
78 put_gref_on_freelist(struct xennet_info *xi, grant_ref_t gref)
79 {
80 xi->tx_gref_list[xi->tx_gref_free] = gref;
81 xi->tx_gref_free++;
82 }
85 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
87 /*
88 * Windows assumes that if we can do large send offload then we can
89 * do IP header csum offload, so we have to fake it!
90 */
91 VOID
92 XenNet_SumIpHeader(
93 PMDL mdl /* first buffer of the packet - containing the header */
94 )
95 {
96 PVOID buffer = MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority);
97 PUSHORT ushorts = (PUSHORT)buffer;
99 USHORT length_in_ushorts;
100 USHORT i;
101 ULONG csum = 0;
103 ASSERT(buffer);
104 switch (SWAP_USHORT(ushorts[6]))
105 {
106 case 0x0800:
107 /* check if buffer is long enough to contain ethernet header + minimum ip header */
108 ushorts = &ushorts[0x07];
109 length_in_ushorts = ((SWAP_USHORT(ushorts[0]) >> 8) & 0x0F) * 2;
110 /* check if buffer is long enough to contain options too */
111 break;
112 default:
113 return;
114 }
115 ushorts[5] = 0;
116 for (i = 0; i < length_in_ushorts; i++)
117 {
118 csum += SWAP_USHORT(ushorts[i]);
119 }
120 while (csum & 0xFFFF0000)
121 csum = (csum & 0xFFFF) + (csum >> 16);
122 ushorts[5] = SWAP_USHORT(~csum);
123 }
125 typedef struct
126 {
127 PFN_NUMBER pfn;
128 USHORT offset;
129 USHORT length;
130 } page_element_t;
133 static VOID
134 XenNet_BuildPageList(PNDIS_PACKET packet, page_element_t *elements, PUSHORT num_elements)
135 {
136 USHORT element_num = 0;
137 UINT offset;
138 PVOID addr;
139 UINT remaining;
140 ULONG pages;
141 USHORT page;
142 PMDL buffer;
143 PPFN_NUMBER pfns;
145 NdisQueryPacket(packet, NULL, NULL, &buffer, NULL);
147 // sg_list = NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
148 while (buffer != NULL)
149 {
150 addr = MmGetSystemAddressForMdlSafe(buffer, NormalPagePriority);
151 offset = MmGetMdlByteOffset(buffer);
152 remaining = MmGetMdlByteCount(buffer);
153 pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(buffer), remaining);
154 pfns = MmGetMdlPfnArray(buffer);
155 for (page = 0; page < pages; page++, element_num++)
156 {
157 ASSERT(element_num < *num_elements);
158 elements[element_num].pfn = pfns[page];
159 elements[element_num].offset = (USHORT)offset;
160 elements[element_num].length = (USHORT)min(remaining, PAGE_SIZE - offset);
161 offset = 0;
162 remaining -= elements[element_num].length;
163 }
164 ASSERT(remaining == 0);
165 NdisGetNextBuffer(buffer, &buffer);
166 }
167 *num_elements = element_num;
168 }
170 /* Place a buffer on tx ring. */
171 static struct netif_tx_request*
172 XenNet_PutOnTxRing(
173 struct xennet_info *xi,
174 PFN_NUMBER pfn,
175 USHORT offset,
176 USHORT len,
177 uint16_t flags)
178 {
179 struct netif_tx_request *tx;
180 unsigned short id;
182 id = get_id_from_freelist(xi);
183 ASSERT(id != FREELIST_ID_ERROR);
184 ASSERT(xi->tx_pkts[id] == NULL);
185 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
187 tx->gref = get_gref_from_freelist(xi);
188 ASSERT(tx->gref != 0);
189 ASSERT(xi->tx_grefs[id] == 0);
190 xi->tx_grefs[id] = tx->gref;
192 xi->XenInterface.GntTbl_GrantAccess(
193 xi->XenInterface.InterfaceHeader.Context, 0,
194 pfn, FALSE, tx->gref);
195 tx->id = id;
196 tx->offset = (uint16_t)offset;
197 tx->size = (uint16_t)len;
198 tx->flags = flags;
199 PC_INC(ProfCount_TxPacketsTotal);
201 return tx;
202 }
204 /* Called at DISPATCH_LEVEL with tx_lock held */
205 /*
206 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
207 */
208 static BOOLEAN
209 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
210 {
211 struct netif_tx_request *tx = NULL;
212 struct netif_extra_info *ei;
213 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
214 UINT total_packet_length;
215 ULONG mss; // 0 if not using large send
216 PMDL first_buffer;
217 uint16_t flags = NETTXF_more_data;
218 page_element_t elements[NET_TX_RING_SIZE];
219 USHORT num_elements;
220 USHORT element_num;
222 #if defined(XEN_PROFILE)
223 LARGE_INTEGER tsc, dummy;
225 tsc = KeQueryPerformanceCounter(&dummy);
226 #endif
228 NdisQueryPacket(packet, NULL, NULL, &first_buffer, &total_packet_length);
230 num_elements = NET_TX_RING_SIZE;
231 XenNet_BuildPageList(packet, elements, &num_elements);
232 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
234 if (num_elements + !!mss > (int)free_requests(xi))
235 return FALSE;
237 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
238 packet, TcpIpChecksumPacketInfo);
239 if (csum_info->Transmit.NdisPacketTcpChecksum
240 || csum_info->Transmit.NdisPacketUdpChecksum)
241 {
242 flags |= NETTXF_csum_blank | NETTXF_data_validated;
243 PC_INC(ProfCount_TxPacketsCsumOffload);
244 }
246 if (mss > 0)
247 {
248 flags |= NETTXF_extra_info;
249 XenNet_SumIpHeader(first_buffer);
250 PC_INC(ProfCount_TxPacketsLargeOffload);
251 }
253 /*
254 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
255 * (C) rest of requests on the ring. Only (A) has csum flags.
256 */
257 /* (A) */
258 tx = XenNet_PutOnTxRing(xi, elements[0].pfn, elements[0].offset, elements[0].length, flags);
259 tx->size = (uint16_t)total_packet_length; /* 1st req size always tot pkt len */
260 xi->tx.req_prod_pvt++;
262 /* (B) */
263 if (mss > 0)
264 {
265 get_no_id_from_freelist(xi);
266 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
267 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
268 ei->flags = 0;
269 ei->u.gso.size = (USHORT) mss;
270 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
271 ei->u.gso.pad = 0;
272 ei->u.gso.features = 0;
274 xi->tx.req_prod_pvt++;
275 }
277 /* (C) */
278 for (element_num = 1; element_num < num_elements; element_num++)
279 {
280 //KdPrint((__DRIVER_NAME " i = %d\n", i));
281 tx = XenNet_PutOnTxRing(xi, elements[element_num].pfn,
282 elements[element_num].offset, elements[element_num].length,
283 NETTXF_more_data);
284 xi->tx.req_prod_pvt++;
285 }
287 /* only set the packet on the last buffer, clear more_data */
288 ASSERT(tx);
289 xi->tx_pkts[tx->id] = packet;
290 tx->flags &= ~NETTXF_more_data;
292 return TRUE;
293 }
295 /* Called at DISPATCH_LEVEL with tx_lock held */
297 static VOID
298 XenNet_SendQueuedPackets(struct xennet_info *xi)
299 {
300 PLIST_ENTRY entry;
301 PNDIS_PACKET packet;
302 int notify;
303 #if defined(XEN_PROFILE)
304 LARGE_INTEGER tsc, dummy;
305 #endif
307 int cycles = 0;
308 BOOLEAN success;
310 #if defined(XEN_PROFILE)
311 tsc = KeQueryPerformanceCounter(&dummy);
312 #endif
314 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
315 /* if empty, the above returns head*, not NULL */
316 while (entry != &xi->tx_waiting_pkt_list)
317 {
318 ASSERT(cycles++ < 65536);
319 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
320 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
321 success = XenNet_HWSendPacket(xi, packet);
322 if (!success)
323 break;
324 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
325 }
327 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
328 if (notify)
329 {
330 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
331 xi->event_channel);
332 }
334 #if defined(XEN_PROFILE)
335 ProfTime_SendQueuedPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
336 ProfCount_SendQueuedPackets++;
337 #endif
338 }
340 // Called at DISPATCH_LEVEL
341 NDIS_STATUS
342 XenNet_TxBufferGC(struct xennet_info *xi)
343 {
344 RING_IDX cons, prod;
345 unsigned short id;
346 PNDIS_PACKET packets[NET_TX_RING_SIZE];
347 ULONG packet_count = 0;
348 int moretodo;
349 ULONG i;
350 UINT total_packet_length;
351 int cycles = 0;
352 #if defined(XEN_PROFILE)
353 LARGE_INTEGER tsc, dummy;
354 #endif
356 ASSERT(xi->connected);
357 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
359 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
361 #if defined(XEN_PROFILE)
362 tsc = KeQueryPerformanceCounter(&dummy);
363 #endif
365 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
367 do {
368 ASSERT(cycles++ < 65536);
369 prod = xi->tx.sring->rsp_prod;
370 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
372 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
373 {
374 struct netif_tx_response *txrsp;
376 ASSERT(cycles++ < 65536);
378 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
379 if (txrsp->status == NETIF_RSP_NULL)
380 {
381 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
382 put_no_id_on_freelist(xi);
383 continue; // This would be the response to an extra_info packet
384 }
386 id = txrsp->id;
387 packets[packet_count] = xi->tx_pkts[id];
388 if (packets[packet_count])
389 {
390 NdisQueryPacket(packets[packet_count], NULL, NULL, NULL, &total_packet_length);
391 if (NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) != 0)
392 {
393 NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) = UlongToPtr(total_packet_length);
394 //KdPrint((__DRIVER_NAME " Large Send Response = %d\n", NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo)));
395 }
396 xi->tx_pkts[id] = NULL;
397 packet_count++;
398 xi->stat_tx_ok++;
399 }
400 put_gref_on_freelist(xi, xi->tx_grefs[id]);
401 xi->tx_grefs[id] = 0;
402 put_id_on_freelist(xi, id);
403 }
405 xi->tx.rsp_cons = prod;
407 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
408 } while (moretodo);
410 /* if queued packets, send them now */
411 XenNet_SendQueuedPackets(xi);
413 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
415 for (i = 0; i < packet_count; i++)
416 {
417 /* A miniport driver must release any spin lock that it is holding before
418 calling NdisMSendComplete. */
419 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
420 xi->tx_outstanding--;
421 }
423 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
425 #if defined(XEN_PROFILE)
426 ProfTime_TxBufferGC.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
427 ProfCount_TxBufferGC++;
428 #endif
430 return NDIS_STATUS_SUCCESS;
431 }
433 VOID
434 XenNet_SendPackets(
435 IN NDIS_HANDLE MiniportAdapterContext,
436 IN PPNDIS_PACKET PacketArray,
437 IN UINT NumberOfPackets
438 )
439 {
440 struct xennet_info *xi = MiniportAdapterContext;
441 PNDIS_PACKET packet;
442 UINT i;
443 PLIST_ENTRY entry;
444 KIRQL OldIrql;
445 #if defined(XEN_PROFILE)
446 LARGE_INTEGER tsc, dummy;
447 KIRQL OldIrql2;
448 #endif
450 #if defined(XEN_PROFILE)
451 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
452 tsc = KeQueryPerformanceCounter(&dummy);
453 #endif
455 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
457 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
458 for (i = 0; i < NumberOfPackets; i++)
459 {
460 packet = PacketArray[i];
461 ASSERT(packet);
462 *(ULONG *)&packet->MiniportReservedEx = 0;
463 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
464 InsertTailList(&xi->tx_waiting_pkt_list, entry);
465 xi->tx_outstanding++;
466 #if defined(XEN_PROFILE)
467 ProfCount_PacketsPerSendPackets++;
468 #endif
469 }
471 XenNet_SendQueuedPackets(xi);
473 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
475 #if defined(XEN_PROFILE)
476 ProfTime_SendPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
477 ProfCount_SendPackets++;
478 KeLowerIrql(OldIrql2);
479 #endif
481 #if defined(XEN_PROFILE)
482 if ((ProfCount_SendPackets & 1023) == 0)
483 {
484 KdPrint((__DRIVER_NAME " ***\n"));
485 KdPrint((__DRIVER_NAME " RxBufferAlloc Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferAlloc, (ProfCount_RxBufferAlloc == 0)?0:(ProfTime_RxBufferAlloc.QuadPart / ProfCount_RxBufferAlloc)));
486 KdPrint((__DRIVER_NAME " ReturnPacket Count = %10d, Avg Time = %10ld\n", ProfCount_ReturnPacket, (ProfCount_ReturnPacket == 0)?0:(ProfTime_ReturnPacket.QuadPart / ProfCount_ReturnPacket)));
487 KdPrint((__DRIVER_NAME " RxBufferCheck Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferCheck, (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheck.QuadPart / ProfCount_RxBufferCheck)));
488 KdPrint((__DRIVER_NAME " RxBufferCheckTop Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckTopHalf.QuadPart / ProfCount_RxBufferCheck)));
489 KdPrint((__DRIVER_NAME " RxBufferCheckBot Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckBotHalf.QuadPart / ProfCount_RxBufferCheck)));
490 KdPrint((__DRIVER_NAME " Linearize Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
491 KdPrint((__DRIVER_NAME " SendPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
492 KdPrint((__DRIVER_NAME " Packets per SendPackets = %10d\n", (ProfCount_SendPackets == 0)?0:(ProfCount_PacketsPerSendPackets / ProfCount_SendPackets)));
493 KdPrint((__DRIVER_NAME " SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
494 KdPrint((__DRIVER_NAME " TxBufferGC Count = %10d, Avg Time = %10ld\n", ProfCount_TxBufferGC, (ProfCount_TxBufferGC == 0)?0:(ProfTime_TxBufferGC.QuadPart / ProfCount_TxBufferGC)));
495 KdPrint((__DRIVER_NAME " RxPackets Total = %10d, Csum Offload = %10d, Calls To Receive = %10d\n", ProfCount_RxPacketsTotal, ProfCount_RxPacketsCsumOffload, ProfCount_CallsToIndicateReceive));
496 KdPrint((__DRIVER_NAME " TxPackets Total = %10d, Csum Offload = %10d, Large Offload = %10d\n", ProfCount_TxPacketsTotal, ProfCount_TxPacketsCsumOffload, ProfCount_TxPacketsLargeOffload));
497 }
498 #endif
499 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
500 }
502 static void
503 XenNet_TxBufferFree(struct xennet_info *xi)
504 {
505 PLIST_ENTRY entry;
506 PNDIS_PACKET packet;
507 USHORT i;
508 grant_ref_t gref;
510 ASSERT(!xi->connected);
512 /* Free packets in tx queue */
513 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
514 while (entry != &xi->tx_waiting_pkt_list)
515 {
516 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
517 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
518 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
519 }
521 /* free sent-but-not-completed packets */
522 for (i = 0; i < NET_TX_RING_SIZE; i++)
523 {
524 packet = xi->tx_pkts[i];
525 if (packet != NULL)
526 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
527 gref = xi->tx_grefs[i];
528 if (gref != 0)
529 xi->XenInterface.GntTbl_EndAccess(
530 xi->XenInterface.InterfaceHeader.Context, gref, TRUE);
531 }
532 }
534 BOOLEAN
535 XenNet_TxInit(xennet_info_t *xi)
536 {
537 USHORT i;
539 xi->tx_mdl = AllocatePage();
540 xi->tx_pgs = MmGetMdlVirtualAddress(xi->tx_mdl);
541 SHARED_RING_INIT(xi->tx_pgs);
542 FRONT_RING_INIT(&xi->tx, xi->tx_pgs, PAGE_SIZE);
543 xi->tx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
544 xi->XenInterface.InterfaceHeader.Context, 0,
545 *MmGetMdlPfnArray(xi->tx_mdl), FALSE, 0);
546 xi->tx_id_free = 0;
547 xi->tx_no_id_free = 0;
548 for (i = 0; i < NET_TX_RING_SIZE; i++)
549 {
550 xi->tx_pkts[i] = NULL;
551 put_id_on_freelist(xi, i);
552 }
553 xi->tx_gref_free = 0;
554 for (i = 0; i < NET_TX_RING_SIZE; i++)
555 {
556 xi->tx_grefs[i] = 0;
557 put_gref_on_freelist(xi, xi->XenInterface.GntTbl_GetRef(
558 xi->XenInterface.InterfaceHeader.Context));
559 }
560 return TRUE;
561 }
563 BOOLEAN
564 XenNet_TxShutdown(xennet_info_t *xi)
565 {
566 ULONG i;
568 XenNet_TxBufferFree(xi);
570 /* free TX resources */
571 if (xi->XenInterface.GntTbl_EndAccess(
572 xi->XenInterface.InterfaceHeader.Context, xi->tx_ring_ref, 0))
573 {
574 xi->tx_ring_ref = GRANT_INVALID_REF;
575 FreePages(xi->tx_mdl);
576 }
577 /* if EndAccess fails then tx/rx ring pages LEAKED -- it's not safe to reuse
578 pages Dom0 still has access to */
579 xi->tx_pgs = NULL;
581 /* I think that NDIS takes care of this for us... */
582 ASSERT(xi->tx_outstanding == 0);
584 for (i = 0; i < NET_TX_RING_SIZE; i++)
585 {
586 xi->XenInterface.GntTbl_PutRef(
587 xi->XenInterface.InterfaceHeader.Context, xi->tx_gref_list[i]);
588 }
590 return TRUE;
591 }