win-pvdrivers

view xennet/xennet_tx.c @ 248:7c395bd04ec1

Some tweaking to get gso working properly again.
author James Harper <james.harper@bendigoit.com.au>
date Sat Apr 05 23:32:53 2008 +1100 (2008-04-05)
parents 565483912dc0
children 1e47fb7bce04
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 #define FREELIST_ID_ERROR 0xFFFF
25 #ifdef XEN_PROFILE
26 #define PC_INC(var) var++
27 #else
28 #define PC_INC(var)
29 #endif
31 static ULONG
32 free_requests(struct xennet_info *xi)
33 {
34 return xi->tx_id_free;
35 }
37 static USHORT
38 get_id_from_freelist(struct xennet_info *xi)
39 {
40 if (xi->tx_id_free - xi->tx_no_id_free == 0)
41 return FREELIST_ID_ERROR;
42 xi->tx_id_free--;
43 return xi->tx_id_list[xi->tx_id_free];
44 }
46 static USHORT
47 get_no_id_from_freelist(struct xennet_info *xi)
48 {
49 if (xi->tx_id_free - xi->tx_no_id_free == 0)
50 return FREELIST_ID_ERROR;
51 xi->tx_no_id_free--;
52 return 0;
53 }
55 static VOID
56 put_id_on_freelist(struct xennet_info *xi, USHORT id)
57 {
58 xi->tx_id_list[xi->tx_id_free] = id;
59 xi->tx_id_free++;
60 }
62 static VOID
63 put_no_id_on_freelist(struct xennet_info *xi)
64 {
65 xi->tx_no_id_free++;
66 }
68 static grant_ref_t
69 get_gref_from_freelist(struct xennet_info *xi)
70 {
71 if (xi->tx_gref_free == 0)
72 return 0;
73 xi->tx_gref_free--;
74 return xi->tx_gref_list[xi->tx_gref_free];
75 }
77 static VOID
78 put_gref_on_freelist(struct xennet_info *xi, grant_ref_t gref)
79 {
80 xi->tx_gref_list[xi->tx_gref_free] = gref;
81 xi->tx_gref_free++;
82 }
85 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
87 typedef struct
88 {
89 PFN_NUMBER pfn;
90 USHORT offset;
91 USHORT length;
92 } page_element_t;
94 static VOID
95 XenNet_BuildPageList(packet_info_t *pi, page_element_t *elements, PUSHORT num_elements)
96 {
97 USHORT element_num = 0;
98 UINT offset;
99 UINT remaining;
100 ULONG pages;
101 USHORT page;
102 PPFN_NUMBER pfns;
103 ULONG i;
105 for (i = 0; i < pi->mdl_count; i++)
106 {
107 offset = MmGetMdlByteOffset(pi->mdls[i]);
108 remaining = MmGetMdlByteCount(pi->mdls[i]);
109 pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pi->mdls[i]), remaining);
110 pfns = MmGetMdlPfnArray(pi->mdls[i]);
111 for (page = 0; page < pages; page++, element_num++)
112 {
113 ASSERT(element_num < *num_elements);
114 elements[element_num].pfn = pfns[page];
115 elements[element_num].offset = (USHORT)offset;
116 elements[element_num].length = (USHORT)min(remaining, PAGE_SIZE - offset);
117 //KdPrint((__DRIVER_NAME " adding to page list size = %d, pfn = %08x, offset = %04x\n", elements[element_num].length, elements[element_num].pfn, elements[element_num].offset));
118 offset = 0;
119 remaining -= elements[element_num].length;
120 }
121 ASSERT(remaining == 0);
122 }
123 *num_elements = element_num;
124 }
126 /* Place a buffer on tx ring. */
127 static struct netif_tx_request*
128 XenNet_PutOnTxRing(
129 struct xennet_info *xi,
130 PFN_NUMBER pfn,
131 USHORT offset,
132 USHORT len,
133 uint16_t flags)
134 {
135 struct netif_tx_request *tx;
136 unsigned short id;
138 id = get_id_from_freelist(xi);
139 ASSERT(id != FREELIST_ID_ERROR);
140 ASSERT(xi->tx_pkts[id] == NULL);
141 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
143 tx->gref = get_gref_from_freelist(xi);
144 ASSERT(tx->gref != 0);
145 ASSERT(xi->tx_grefs[id] == 0);
146 xi->tx_grefs[id] = tx->gref;
148 xi->XenInterface.GntTbl_GrantAccess(
149 xi->XenInterface.InterfaceHeader.Context, 0,
150 pfn, FALSE, tx->gref);
151 tx->id = id;
152 tx->offset = (uint16_t)offset;
153 tx->size = (uint16_t)len;
154 tx->flags = flags;
155 PC_INC(ProfCount_TxPacketsTotal);
157 return tx;
158 }
160 /* Called at DISPATCH_LEVEL with tx_lock held */
161 /*
162 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
163 */
164 static BOOLEAN
165 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
166 {
167 struct netif_tx_request *tx = NULL;
168 struct netif_extra_info *ei;
169 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
170 UINT total_packet_length;
171 ULONG mss; // 0 if not using large send
172 PMDL buffer;
173 uint16_t flags = NETTXF_more_data;
174 page_element_t elements[NET_TX_RING_SIZE];
175 USHORT num_elements;
176 USHORT element_num;
177 packet_info_t pi;
178 PUCHAR address = NULL;
179 PMDL merged_buffer = NULL;
180 ULONG length = 0;
182 #if defined(XEN_PROFILE)
183 LARGE_INTEGER tsc, dummy;
185 tsc = KeQueryPerformanceCounter(&dummy);
186 #endif
188 RtlZeroMemory(&pi, sizeof(pi));
190 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
191 packet, TcpIpChecksumPacketInfo);
192 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
194 NdisQueryPacket(packet, NULL, NULL, &buffer, &total_packet_length);
196 pi.mdls[0] = buffer;
197 pi.mdl_count = 1;
198 // only if csum offload
199 if ((csum_info->Transmit.NdisPacketTcpChecksum
200 || csum_info->Transmit.NdisPacketUdpChecksum
201 || mss > 0)
202 && XenNet_ParsePacketHeader(&pi) == PARSE_TOO_SMALL)
203 {
204 pi.mdls[0] = merged_buffer = AllocatePage();
205 address = MmGetMdlVirtualAddress(pi.mdls[0]);
206 memcpy(address, MmGetSystemAddressForMdlSafe(buffer, NormalPagePriority), MmGetMdlByteCount(buffer));
207 length = MmGetMdlByteCount(buffer);
208 NdisAdjustBufferLength(pi.mdls[0], length); /* do this here so that ParsePacketHeader works */
209 while (buffer->Next != NULL && XenNet_ParsePacketHeader(&pi) == PARSE_TOO_SMALL)
210 {
211 buffer = buffer->Next;
212 ASSERT(length + MmGetMdlByteCount(buffer) <= PAGE_SIZE); // I think this could happen
213 memcpy(&address[length], MmGetSystemAddressForMdlSafe(buffer, NormalPagePriority), MmGetMdlByteCount(buffer));
214 length += MmGetMdlByteCount(buffer);
215 NdisAdjustBufferLength(pi.mdls[0], length); /* do this here so that ParsePacketHeader works */
216 }
217 }
218 NdisGetNextBuffer(buffer, &buffer);
219 while (buffer != NULL)
220 {
221 pi.mdls[pi.mdl_count++] = buffer;
222 NdisGetNextBuffer(buffer, &buffer);
223 }
225 num_elements = NET_TX_RING_SIZE;
226 XenNet_BuildPageList(&pi, elements, &num_elements);
228 if (num_elements + !!mss > (int)free_requests(xi))
229 return FALSE;
231 if (csum_info->Transmit.NdisPacketTcpChecksum
232 || csum_info->Transmit.NdisPacketUdpChecksum)
233 {
234 flags |= NETTXF_csum_blank | NETTXF_data_validated;
235 PC_INC(ProfCount_TxPacketsCsumOffload);
236 }
238 if (mss > 0)
239 {
240 flags |= NETTXF_extra_info;
241 XenNet_SumIpHeader(MmGetSystemAddressForMdlSafe(pi.mdls[0], NormalPagePriority), pi.ip4_header_length);
242 PC_INC(ProfCount_TxPacketsLargeOffload);
243 }
245 /*
246 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
247 * (C) rest of requests on the ring. Only (A) has csum flags.
248 */
249 /* (A) */
250 tx = XenNet_PutOnTxRing(xi, elements[0].pfn, elements[0].offset, (USHORT)total_packet_length, flags);
251 xi->tx.req_prod_pvt++;
253 /* (B) */
254 if (mss > 0)
255 {
256 get_no_id_from_freelist(xi);
257 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
258 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
259 ei->flags = 0;
260 ei->u.gso.size = (USHORT) mss;
261 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
262 ei->u.gso.pad = 0;
263 ei->u.gso.features = 0;
265 xi->tx.req_prod_pvt++;
266 }
268 /* (C) */
269 for (element_num = 1; element_num < num_elements; element_num++)
270 {
271 //if (csum_info->Transmit.NdisPacketTcpChecksum || csum_info->Transmit.NdisPacketUdpChecksum)
272 //KdPrint((__DRIVER_NAME " - size = %d, pfn = %08x, offset = %04x\n", elements[element_num].length, elements[element_num].pfn, elements[element_num].offset));
273 //KdPrint((__DRIVER_NAME " i = %d\n", i));
274 tx = XenNet_PutOnTxRing(xi, elements[element_num].pfn,
275 elements[element_num].offset, elements[element_num].length,
276 NETTXF_more_data);
277 xi->tx.req_prod_pvt++;
278 }
280 /* only set the packet on the last buffer, clear more_data */
281 ASSERT(tx);
282 xi->tx_pkts[tx->id] = packet;
283 xi->tx_mdls[tx->id] = merged_buffer;
284 tx->flags &= ~NETTXF_more_data;
286 return TRUE;
287 }
289 /* Called at DISPATCH_LEVEL with tx_lock held */
291 static VOID
292 XenNet_SendQueuedPackets(struct xennet_info *xi)
293 {
294 PLIST_ENTRY entry;
295 PNDIS_PACKET packet;
296 int notify;
297 #if defined(XEN_PROFILE)
298 LARGE_INTEGER tsc, dummy;
299 #endif
301 int cycles = 0;
302 BOOLEAN success;
304 #if defined(XEN_PROFILE)
305 tsc = KeQueryPerformanceCounter(&dummy);
306 #endif
308 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
309 /* if empty, the above returns head*, not NULL */
310 while (entry != &xi->tx_waiting_pkt_list)
311 {
312 ASSERT(cycles++ < 65536);
313 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
314 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
315 success = XenNet_HWSendPacket(xi, packet);
316 if (!success)
317 break;
318 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
319 }
321 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
322 if (notify)
323 {
324 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
325 xi->event_channel);
326 }
328 #if defined(XEN_PROFILE)
329 ProfTime_SendQueuedPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
330 ProfCount_SendQueuedPackets++;
331 #endif
332 }
334 // Called at DISPATCH_LEVEL
335 NDIS_STATUS
336 XenNet_TxBufferGC(struct xennet_info *xi)
337 {
338 RING_IDX cons, prod;
339 unsigned short id;
340 PNDIS_PACKET packets[NET_TX_RING_SIZE];
341 ULONG packet_count = 0;
342 int moretodo;
343 ULONG i;
344 UINT total_packet_length;
345 int cycles = 0;
346 #if defined(XEN_PROFILE)
347 LARGE_INTEGER tsc, dummy;
348 #endif
350 ASSERT(xi->connected);
351 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
353 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
355 #if defined(XEN_PROFILE)
356 tsc = KeQueryPerformanceCounter(&dummy);
357 #endif
359 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
361 do {
362 ASSERT(cycles++ < 65536);
363 prod = xi->tx.sring->rsp_prod;
364 KeMemoryBarrier(); /* Ensure we see responses up to 'rp'. */
366 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
367 {
368 struct netif_tx_response *txrsp;
370 ASSERT(cycles++ < 65536);
372 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
373 if (txrsp->status == NETIF_RSP_NULL)
374 {
375 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
376 put_no_id_on_freelist(xi);
377 continue; // This would be the response to an extra_info packet
378 }
380 id = txrsp->id;
381 packets[packet_count] = xi->tx_pkts[id];
382 if (packets[packet_count])
383 {
384 NdisQueryPacket(packets[packet_count], NULL, NULL, NULL, &total_packet_length);
385 if (NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) != 0)
386 {
387 NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo) = UlongToPtr(total_packet_length);
388 //KdPrint((__DRIVER_NAME " Large Send Response = %d\n", NDIS_PER_PACKET_INFO_FROM_PACKET(packets[packet_count], TcpLargeSendPacketInfo)));
389 }
390 xi->tx_pkts[id] = NULL;
391 packet_count++;
392 xi->stat_tx_ok++;
393 }
394 if (xi->tx_mdls[id])
395 {
396 FreePages(xi->tx_mdls[id]);
397 xi->tx_mdls[id] = NULL;
398 }
399 put_gref_on_freelist(xi, xi->tx_grefs[id]);
400 xi->tx_grefs[id] = 0;
401 put_id_on_freelist(xi, id);
402 }
404 xi->tx.rsp_cons = prod;
406 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
407 } while (moretodo);
409 /* if queued packets, send them now */
410 XenNet_SendQueuedPackets(xi);
412 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
414 for (i = 0; i < packet_count; i++)
415 {
416 /* A miniport driver must release any spin lock that it is holding before
417 calling NdisMSendComplete. */
418 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
419 xi->tx_outstanding--;
420 }
422 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
424 #if defined(XEN_PROFILE)
425 ProfTime_TxBufferGC.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
426 ProfCount_TxBufferGC++;
427 #endif
429 return NDIS_STATUS_SUCCESS;
430 }
432 VOID
433 XenNet_SendPackets(
434 IN NDIS_HANDLE MiniportAdapterContext,
435 IN PPNDIS_PACKET PacketArray,
436 IN UINT NumberOfPackets
437 )
438 {
439 struct xennet_info *xi = MiniportAdapterContext;
440 PNDIS_PACKET packet;
441 UINT i;
442 PLIST_ENTRY entry;
443 KIRQL OldIrql;
444 #if defined(XEN_PROFILE)
445 LARGE_INTEGER tsc, dummy;
446 KIRQL OldIrql2;
447 #endif
449 #if defined(XEN_PROFILE)
450 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
451 tsc = KeQueryPerformanceCounter(&dummy);
452 #endif
454 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
456 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
457 for (i = 0; i < NumberOfPackets; i++)
458 {
459 packet = PacketArray[i];
460 ASSERT(packet);
461 *(ULONG *)&packet->MiniportReservedEx = 0;
462 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
463 InsertTailList(&xi->tx_waiting_pkt_list, entry);
464 xi->tx_outstanding++;
465 #if defined(XEN_PROFILE)
466 ProfCount_PacketsPerSendPackets++;
467 #endif
468 }
470 XenNet_SendQueuedPackets(xi);
472 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
474 #if defined(XEN_PROFILE)
475 ProfTime_SendPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
476 ProfCount_SendPackets++;
477 KeLowerIrql(OldIrql2);
478 #endif
480 #if defined(XEN_PROFILE)
481 if ((ProfCount_SendPackets & 1023) == 0)
482 {
483 KdPrint((__DRIVER_NAME " ***\n"));
484 KdPrint((__DRIVER_NAME " RxBufferAlloc Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferAlloc, (ProfCount_RxBufferAlloc == 0)?0:(ProfTime_RxBufferAlloc.QuadPart / ProfCount_RxBufferAlloc)));
485 KdPrint((__DRIVER_NAME " ReturnPacket Count = %10d, Avg Time = %10ld\n", ProfCount_ReturnPacket, (ProfCount_ReturnPacket == 0)?0:(ProfTime_ReturnPacket.QuadPart / ProfCount_ReturnPacket)));
486 KdPrint((__DRIVER_NAME " RxBufferCheck Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferCheck, (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheck.QuadPart / ProfCount_RxBufferCheck)));
487 KdPrint((__DRIVER_NAME " RxBufferCheckTop Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckTopHalf.QuadPart / ProfCount_RxBufferCheck)));
488 KdPrint((__DRIVER_NAME " RxBufferCheckBot Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckBotHalf.QuadPart / ProfCount_RxBufferCheck)));
489 KdPrint((__DRIVER_NAME " Linearize Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
490 KdPrint((__DRIVER_NAME " SendPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
491 KdPrint((__DRIVER_NAME " Packets per SendPackets = %10d\n", (ProfCount_SendPackets == 0)?0:(ProfCount_PacketsPerSendPackets / ProfCount_SendPackets)));
492 KdPrint((__DRIVER_NAME " SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
493 KdPrint((__DRIVER_NAME " TxBufferGC Count = %10d, Avg Time = %10ld\n", ProfCount_TxBufferGC, (ProfCount_TxBufferGC == 0)?0:(ProfTime_TxBufferGC.QuadPart / ProfCount_TxBufferGC)));
494 KdPrint((__DRIVER_NAME " RxPackets Total = %10d, Csum Offload = %10d, Calls To Receive = %10d\n", ProfCount_RxPacketsTotal, ProfCount_RxPacketsCsumOffload, ProfCount_CallsToIndicateReceive));
495 KdPrint((__DRIVER_NAME " TxPackets Total = %10d, Csum Offload = %10d, Large Offload = %10d\n", ProfCount_TxPacketsTotal, ProfCount_TxPacketsCsumOffload, ProfCount_TxPacketsLargeOffload));
496 }
497 #endif
498 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
499 }
501 static void
502 XenNet_TxBufferFree(struct xennet_info *xi)
503 {
504 PLIST_ENTRY entry;
505 PNDIS_PACKET packet;
506 USHORT i;
507 grant_ref_t gref;
509 ASSERT(!xi->connected);
511 /* Free packets in tx queue */
512 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
513 while (entry != &xi->tx_waiting_pkt_list)
514 {
515 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
516 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
517 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
518 }
520 /* free sent-but-not-completed packets */
521 for (i = 0; i < NET_TX_RING_SIZE; i++)
522 {
523 packet = xi->tx_pkts[i];
524 if (packet != NULL)
525 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
526 gref = xi->tx_grefs[i];
527 if (gref != 0)
528 xi->XenInterface.GntTbl_EndAccess(
529 xi->XenInterface.InterfaceHeader.Context, gref, TRUE);
530 }
531 }
533 BOOLEAN
534 XenNet_TxInit(xennet_info_t *xi)
535 {
536 USHORT i;
538 xi->tx_mdl = AllocatePage();
539 xi->tx_pgs = MmGetMdlVirtualAddress(xi->tx_mdl);
540 SHARED_RING_INIT(xi->tx_pgs);
541 FRONT_RING_INIT(&xi->tx, xi->tx_pgs, PAGE_SIZE);
542 xi->tx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
543 xi->XenInterface.InterfaceHeader.Context, 0,
544 *MmGetMdlPfnArray(xi->tx_mdl), FALSE, 0);
545 xi->tx_id_free = 0;
546 xi->tx_no_id_free = 0;
547 for (i = 0; i < NET_TX_RING_SIZE; i++)
548 {
549 xi->tx_pkts[i] = NULL;
550 put_id_on_freelist(xi, i);
551 }
552 xi->tx_gref_free = 0;
553 for (i = 0; i < NET_TX_RING_SIZE; i++)
554 {
555 xi->tx_grefs[i] = 0;
556 put_gref_on_freelist(xi, xi->XenInterface.GntTbl_GetRef(
557 xi->XenInterface.InterfaceHeader.Context));
558 }
559 return TRUE;
560 }
562 BOOLEAN
563 XenNet_TxShutdown(xennet_info_t *xi)
564 {
565 ULONG i;
567 XenNet_TxBufferFree(xi);
569 /* free TX resources */
570 if (xi->XenInterface.GntTbl_EndAccess(
571 xi->XenInterface.InterfaceHeader.Context, xi->tx_ring_ref, 0))
572 {
573 xi->tx_ring_ref = GRANT_INVALID_REF;
574 FreePages(xi->tx_mdl);
575 }
576 /* if EndAccess fails then tx/rx ring pages LEAKED -- it's not safe to reuse
577 pages Dom0 still has access to */
578 xi->tx_pgs = NULL;
580 /* I think that NDIS takes care of this for us... */
581 ASSERT(xi->tx_outstanding == 0);
583 for (i = 0; i < NET_TX_RING_SIZE; i++)
584 {
585 xi->XenInterface.GntTbl_PutRef(
586 xi->XenInterface.InterfaceHeader.Context, xi->tx_gref_list[i]);
587 }
589 return TRUE;
590 }