win-pvdrivers

view xennet/xennet_tx.c @ 214:b400fd002071

xennet: tx_outstanding inc/dec appear to have been backwards
author Andy Grover <andy.grover@oracle.com>
date Thu Mar 20 16:39:32 2008 -0700 (2008-03-20)
parents 45fdf0d55f31
children e12cad680f3c
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 if (xi->tx_id_free - xi->tx_no_id_free == 0)
27 return 0xFFFF;
28 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static USHORT
33 get_no_id_from_freelist(struct xennet_info *xi)
34 {
35 if (xi->tx_id_free - xi->tx_no_id_free == 0)
36 return 0xFFFF;
37 xi->tx_no_id_free--;
38 return 0;
39 }
41 static VOID
42 put_id_on_freelist(struct xennet_info *xi, USHORT id)
43 {
44 xi->tx_id_list[xi->tx_id_free] = id;
45 xi->tx_id_free++;
46 }
48 static VOID
49 put_no_id_on_freelist(struct xennet_info *xi)
50 {
51 xi->tx_no_id_free++;
52 }
54 static grant_ref_t
55 get_gref_from_freelist(struct xennet_info *xi)
56 {
57 if (xi->tx_gref_free == 0)
58 return 0;
59 xi->tx_gref_free--;
60 return xi->tx_gref_list[xi->tx_gref_free];
61 }
63 static VOID
64 put_gref_on_freelist(struct xennet_info *xi, grant_ref_t gref)
65 {
66 xi->tx_gref_list[xi->tx_gref_free] = gref;
67 xi->tx_gref_free++;
68 }
71 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
73 /*
74 * Windows assumes that if we can do large send offload then we can
75 * do IP header csum offload, so we have to fake it!
76 */
77 VOID
78 XenNet_SumHeader(
79 PMDL mdl /* first buffer of the packet - containing the header */
80 )
81 {
82 PVOID buffer = MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority);
83 PUSHORT ushorts = (PUSHORT)buffer;
85 USHORT length_in_ushorts;
86 USHORT i;
87 ULONG csum = 0;
89 ASSERT(buffer);
90 switch (SWAP_USHORT(ushorts[6]))
91 {
92 case 0x0800:
93 /* check if buffer is long enough to contain ethernet header + minimum ip header */
94 ushorts = &ushorts[0x07];
95 length_in_ushorts = ((SWAP_USHORT(ushorts[0]) >> 8) & 0x0F) * 2;
96 /* check if buffer is long enough to contain options too */
97 break;
98 default:
99 return;
100 }
101 ushorts[5] = 0;
102 for (i = 0; i < length_in_ushorts; i++)
103 {
104 csum += SWAP_USHORT(ushorts[i]);
105 }
106 while (csum & 0xFFFF0000)
107 csum = (csum & 0xFFFF) + (csum >> 16);
108 ushorts[5] = SWAP_USHORT(~csum);
109 }
111 /* Called at DISPATCH_LEVEL with tx_lock held */
113 static VOID
114 XenNet_SendQueuedPackets(struct xennet_info *xi)
115 {
116 PLIST_ENTRY entry;
117 PNDIS_PACKET packet;
118 struct netif_tx_request *tx;
119 struct netif_extra_info *ei;
120 unsigned short id;
121 int notify;
122 #if defined(XEN_PROFILE)
123 LARGE_INTEGER tsc, dummy;
124 #endif
125 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
127 ULONG i;
128 PSCATTER_GATHER_LIST sg_list;
129 UINT total_packet_length;
130 USHORT remaining;
131 USHORT offset;
132 USHORT length;
133 ULONGLONG curr_addr;
134 ULONG sg_num;
135 ULONG pfn;
136 ULONG mss;
137 PMDL first_buffer;
138 int cycles = 0;
140 #if defined(XEN_PROFILE)
141 tsc = KeQueryPerformanceCounter(&dummy);
142 #endif
144 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
145 /* if empty, the above returns head*, not NULL */
146 while (entry != &xi->tx_waiting_pkt_list)
147 {
148 ASSERT(cycles++ < 256);
149 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
150 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
151 NdisQueryPacket(packet, NULL, NULL, &first_buffer, &total_packet_length);
152 sg_list = NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
153 /*
154 for (i = 0; i < sg_list->NumberOfElements; i++)
155 {
156 KdPrint((__DRIVER_NAME " sg entry %d - start = %08x, length = %d\n", i, sg_list->Elements[i].Address.LowPart, sg_list->Elements[i].Length));
157 }
158 */
159 i = 0;
160 sg_num = 0;
161 remaining = 0;
162 curr_addr = 0;
163 id = 0;
164 mss = 0;
165 offset = 0;
166 while (sg_num < sg_list->NumberOfElements || remaining || (i == 1 && mss))
167 {
168 //KdPrint((__DRIVER_NAME " i = %d\n", i));
169 ASSERT(cycles++ < 256);
170 if (i == 1 && mss)
171 {
172 //KdPrint((__DRIVER_NAME " Start of loop - Large Send...\n"));
173 length = 0;
174 }
175 else if (remaining == 0)
176 {
177 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
178 if (total_packet_length <= mss)
179 mss = 0;
180 //if (mss)
181 //KdPrint((__DRIVER_NAME " Start of loop - First Frag in sg...\n"));
182 curr_addr = sg_list->Elements[sg_num].Address.QuadPart;
183 offset = (USHORT)(sg_list->Elements[sg_num].Address.QuadPart & (PAGE_SIZE - 1));
184 remaining = (USHORT)sg_list->Elements[sg_num].Length;
185 length = min(remaining, PAGE_SIZE - offset);
186 //if (mss)
187 //KdPrint((__DRIVER_NAME " sg entry %d - start = %08x, length = %d\n", sg_num, (ULONG)curr_addr, length));
188 sg_num++;
189 }
190 else
191 {
192 //if (mss)
193 //KdPrint((__DRIVER_NAME " Start of loop - Subsequent Frag in sg...\n"));
194 offset = 0;
195 length = min(remaining, PAGE_SIZE);
196 //if (mss)
197 //KdPrint((__DRIVER_NAME " sg entry %d - start = %08x, length = %d\n", sg_num, (ULONG)curr_addr, length));
198 }
199 remaining = remaining - length;
200 pfn = (ULONG)(curr_addr >> PAGE_SHIFT);
201 curr_addr += length;
203 if (i++ < *(ULONG *)&packet->MiniportReservedEx)
204 continue;
205 if (length > 0)
206 {
207 id = get_id_from_freelist(xi);
208 if (id == 0xFFFF)
209 {
210 KdPrint((__DRIVER_NAME " Out of space...\n"));
211 /* whups, out of space on the ring. requeue and get out */
212 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
213 break;
214 }
215 ASSERT(xi->tx_pkts[id] == NULL);
216 (*(ULONG *)&packet->MiniportReservedEx)++;
217 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
219 tx->gref = get_gref_from_freelist(xi);
220 ASSERT(tx->gref != 0);
221 ASSERT(xi->tx_grefs[id] == 0);
222 xi->tx_grefs[id] = tx->gref;
224 xi->XenInterface.GntTbl_GrantAccess(
225 xi->XenInterface.InterfaceHeader.Context, 0,
226 pfn, FALSE, tx->gref);
227 tx->id = id;
228 tx->offset = offset;
229 tx->flags = 0;
230 if (i == 1) // we have already incremented i!!!
231 {
232 //if (mss)
233 //KdPrint((__DRIVER_NAME " First Frag in packet...\n"));
234 tx->size = (USHORT)total_packet_length;
235 #if defined(XEN_PROFILE)
236 ProfCount_TxPacketsTotal++;
237 #endif
238 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpIpChecksumPacketInfo);
239 if (csum_info->Transmit.NdisPacketTcpChecksum || csum_info->Transmit.NdisPacketUdpChecksum)
240 {
241 tx->flags |= NETTXF_csum_blank|NETTXF_data_validated;
242 #if defined(XEN_PROFILE)
243 ProfCount_TxPacketsCsumOffload++;
244 #endif
245 }
246 if (mss)
247 {
248 XenNet_SumHeader(first_buffer);
249 //KdPrint((__DRIVER_NAME " Large Send Offload - mss = %d, length = %d\n", mss, total_packet_length));
250 tx->flags |= NETTXF_extra_info|NETTXF_csum_blank|NETTXF_data_validated;
251 #if defined(XEN_PROFILE)
252 ProfCount_TxPacketsLargeOffload++;
253 #endif
254 }
255 }
256 else
257 {
258 //if (mss)
259 //KdPrint((__DRIVER_NAME " Subsequent Frag in packet...\n"));
260 tx->size = length;
261 }
262 if (sg_num == sg_list->NumberOfElements && remaining == 0)
263 {
264 //if (mss)
265 //KdPrint((__DRIVER_NAME " No more frags\n"));
266 xi->tx_pkts[id] = packet; /* only set the packet on the last buffer */
267 }
268 else
269 {
270 //if (mss)
271 //KdPrint((__DRIVER_NAME " More frags\n"));
272 tx->flags |= NETTXF_more_data;
273 }
274 }
275 else
276 {
277 id = get_no_id_from_freelist(xi);
278 if (id == 0xFFFF)
279 {
280 KdPrint((__DRIVER_NAME " Out of space...\n"));
281 /* whups, out of space on the ring. requeue and get out */
282 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
283 break;
284 }
285 //if (mss)
286 //KdPrint((__DRIVER_NAME " Extra Info...\n"));
287 (*(ULONG *)&packet->MiniportReservedEx)++;
288 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
289 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
290 ei->flags = 0;
291 ei->u.gso.size = (USHORT)mss;
292 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
293 ei->u.gso.pad = 0;
294 ei->u.gso.features = 0;
295 }
296 xi->tx.req_prod_pvt++;
297 }
298 if (id == 0xFFFF)
299 break;
300 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
301 }
303 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
304 if (notify)
305 {
306 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
307 xi->event_channel);
308 }
310 #if defined(XEN_PROFILE)
311 ProfTime_SendQueuedPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
312 ProfCount_SendQueuedPackets++;
313 #endif
314 }
316 // Called at DISPATCH_LEVEL
317 NDIS_STATUS
318 XenNet_TxBufferGC(struct xennet_info *xi)
319 {
320 RING_IDX cons, prod;
321 unsigned short id;
322 PNDIS_PACKET packets[NET_TX_RING_SIZE];
323 ULONG packet_count = 0;
324 int moretodo;
325 ULONG i;
326 UINT total_packet_length;
327 int cycles = 0;
328 #if defined(XEN_PROFILE)
329 LARGE_INTEGER tsc, dummy;
330 #endif
332 ASSERT(xi->connected);
333 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
335 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
337 #if defined(XEN_PROFILE)
338 tsc = KeQueryPerformanceCounter(&dummy);
339 #endif
341 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
343 do {
344 ASSERT(cycles++ < 256);
345 prod = xi->tx.sring->rsp_prod;
346 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
348 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
349 {
350 struct netif_tx_response *txrsp;
352 ASSERT(cycles++ < 256);
354 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
355 if (txrsp->status == NETIF_RSP_NULL)
356 {
357 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
358 put_no_id_on_freelist(xi);
359 continue; // This would be the response to an extra_info packet
360 }
362 id = txrsp->id;
363 packets[packet_count] = xi->tx_pkts[id];
364 if (packets[packet_count])
365 {
366 NdisQueryPacket(packets[packet_count], NULL, NULL, NULL, &total_packet_length);
367 if (NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) != 0)
368 {
369 NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) = UlongToPtr(total_packet_length);
370 //KdPrint((__DRIVER_NAME " Large Send Response = %d\n", NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo)));
371 }
372 xi->tx_pkts[id] = NULL;
373 packet_count++;
374 xi->stat_tx_ok++;
375 }
376 put_gref_on_freelist(xi, xi->tx_grefs[id]);
377 xi->tx_grefs[id] = 0;
378 put_id_on_freelist(xi, id);
379 xi->tx_outstanding--;
380 }
382 xi->tx.rsp_cons = prod;
384 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
385 } while (moretodo);
387 /* if queued packets, send them now */
388 XenNet_SendQueuedPackets(xi);
390 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
392 for (i = 0; i < packet_count; i++)
393 {
394 /* A miniport driver must release any spin lock that it is holding before
395 calling NdisMSendComplete. */
396 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
397 }
399 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
401 #if defined(XEN_PROFILE)
402 ProfTime_TxBufferGC.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
403 ProfCount_TxBufferGC++;
404 #endif
406 return NDIS_STATUS_SUCCESS;
407 }
409 VOID
410 XenNet_SendPackets(
411 IN NDIS_HANDLE MiniportAdapterContext,
412 IN PPNDIS_PACKET PacketArray,
413 IN UINT NumberOfPackets
414 )
415 {
416 struct xennet_info *xi = MiniportAdapterContext;
417 PNDIS_PACKET packet;
418 UINT i;
419 PLIST_ENTRY entry;
420 KIRQL OldIrql;
421 #if defined(XEN_PROFILE)
422 LARGE_INTEGER tsc, dummy;
423 KIRQL OldIrql2;
424 #endif
426 #if defined(XEN_PROFILE)
427 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
428 tsc = KeQueryPerformanceCounter(&dummy);
429 #endif
431 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
433 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
434 for (i = 0; i < NumberOfPackets; i++)
435 {
436 packet = PacketArray[i];
437 ASSERT(packet);
438 *(ULONG *)&packet->MiniportReservedEx = 0;
439 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
440 InsertTailList(&xi->tx_waiting_pkt_list, entry);
441 xi->tx_outstanding++;
442 #if defined(XEN_PROFILE)
443 ProfCount_PacketsPerSendPackets++;
444 #endif
445 }
447 XenNet_SendQueuedPackets(xi);
449 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
451 #if defined(XEN_PROFILE)
452 ProfTime_SendPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
453 ProfCount_SendPackets++;
454 KeLowerIrql(OldIrql2);
455 #endif
457 #if defined(XEN_PROFILE)
458 if ((ProfCount_SendPackets & 1023) == 0)
459 {
460 KdPrint((__DRIVER_NAME " ***\n"));
461 KdPrint((__DRIVER_NAME " RxBufferAlloc Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferAlloc, (ProfCount_RxBufferAlloc == 0)?0:(ProfTime_RxBufferAlloc.QuadPart / ProfCount_RxBufferAlloc)));
462 KdPrint((__DRIVER_NAME " ReturnPacket Count = %10d, Avg Time = %10ld\n", ProfCount_ReturnPacket, (ProfCount_ReturnPacket == 0)?0:(ProfTime_ReturnPacket.QuadPart / ProfCount_ReturnPacket)));
463 KdPrint((__DRIVER_NAME " RxBufferCheck Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferCheck, (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheck.QuadPart / ProfCount_RxBufferCheck)));
464 KdPrint((__DRIVER_NAME " RxBufferCheckTop Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckTopHalf.QuadPart / ProfCount_RxBufferCheck)));
465 KdPrint((__DRIVER_NAME " RxBufferCheckBot Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckBotHalf.QuadPart / ProfCount_RxBufferCheck)));
466 KdPrint((__DRIVER_NAME " Linearize Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
467 KdPrint((__DRIVER_NAME " SendPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
468 KdPrint((__DRIVER_NAME " Packets per SendPackets = %10d\n", (ProfCount_SendPackets == 0)?0:(ProfCount_PacketsPerSendPackets / ProfCount_SendPackets)));
469 KdPrint((__DRIVER_NAME " SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
470 KdPrint((__DRIVER_NAME " TxBufferGC Count = %10d, Avg Time = %10ld\n", ProfCount_TxBufferGC, (ProfCount_TxBufferGC == 0)?0:(ProfTime_TxBufferGC.QuadPart / ProfCount_TxBufferGC)));
471 KdPrint((__DRIVER_NAME " RxPackets Total = %10d, Csum Offload = %10d, Calls To Receive = %10d\n", ProfCount_RxPacketsTotal, ProfCount_RxPacketsCsumOffload, ProfCount_CallsToIndicateReceive));
472 KdPrint((__DRIVER_NAME " TxPackets Total = %10d, Csum Offload = %10d, Large Offload = %10d\n", ProfCount_TxPacketsTotal, ProfCount_TxPacketsCsumOffload, ProfCount_TxPacketsLargeOffload));
473 }
474 #endif
475 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
476 }
478 static void
479 XenNet_TxBufferFree(struct xennet_info *xi)
480 {
481 PLIST_ENTRY entry;
482 PNDIS_PACKET packet;
483 USHORT i;
484 grant_ref_t gref;
486 ASSERT(!xi->connected);
488 /* Free packets in tx queue */
489 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
490 while (entry != &xi->tx_waiting_pkt_list)
491 {
492 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
493 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
494 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
495 }
497 /* free sent-but-not-completed packets */
498 for (i = 0; i < NET_TX_RING_SIZE; i++)
499 {
500 packet = xi->tx_pkts[i];
501 if (packet != NULL)
502 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
503 gref = xi->tx_grefs[i];
504 if (gref != 0)
505 xi->XenInterface.GntTbl_EndAccess(
506 xi->XenInterface.InterfaceHeader.Context, gref, TRUE);
507 }
508 }
510 BOOLEAN
511 XenNet_TxInit(xennet_info_t *xi)
512 {
513 USHORT i;
515 xi->tx_mdl = AllocatePage();
516 xi->tx_pgs = MmGetMdlVirtualAddress(xi->tx_mdl);
517 SHARED_RING_INIT(xi->tx_pgs);
518 FRONT_RING_INIT(&xi->tx, xi->tx_pgs, PAGE_SIZE);
519 xi->tx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
520 xi->XenInterface.InterfaceHeader.Context, 0,
521 *MmGetMdlPfnArray(xi->tx_mdl), FALSE, 0);
522 xi->tx_id_free = 0;
523 xi->tx_no_id_free = 0;
524 for (i = 0; i < NET_TX_RING_SIZE; i++)
525 {
526 xi->tx_pkts[i] = NULL;
527 put_id_on_freelist(xi, i);
528 }
529 xi->tx_gref_free = 0;
530 for (i = 0; i < NET_TX_RING_SIZE; i++)
531 {
532 xi->tx_grefs[i] = 0;
533 put_gref_on_freelist(xi, xi->XenInterface.GntTbl_GetRef(
534 xi->XenInterface.InterfaceHeader.Context));
535 }
536 return TRUE;
537 }
539 BOOLEAN
540 XenNet_TxShutdown(xennet_info_t *xi)
541 {
542 ULONG i;
544 XenNet_TxBufferFree(xi);
546 /* free TX resources */
547 if (xi->XenInterface.GntTbl_EndAccess(
548 xi->XenInterface.InterfaceHeader.Context, xi->tx_ring_ref, 0))
549 {
550 xi->tx_ring_ref = GRANT_INVALID_REF;
551 FreePages(xi->tx_mdl);
552 }
553 /* if EndAccess fails then tx/rx ring pages LEAKED -- it's not safe to reuse
554 pages Dom0 still has access to */
555 xi->tx_pgs = NULL;
557 for (i = 0; i < NET_TX_RING_SIZE; i++)
558 {
559 xi->XenInterface.GntTbl_PutRef(
560 xi->XenInterface.InterfaceHeader.Context, xi->tx_gref_list[i]);
561 }
563 return TRUE;
564 }