win-pvdrivers

view xennet/xennet_tx.c @ 537:2a74ac2f43bb

more big updates
dma now working under xp
author James Harper <james.harper@bendigoit.com.au>
date Wed Feb 18 22:18:23 2009 +1100 (2009-02-18)
parents 1d39de3ab8d6
children b0b8be2d30c0
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 static __inline shared_buffer_t *
40 get_hb_from_freelist(struct xennet_info *xi)
41 {
42 shared_buffer_t *hb;
44 if (xi->tx_hb_free == 0)
45 {
46 return NULL;
47 }
48 xi->tx_hb_free--;
50 hb = &xi->tx_hbs[xi->tx_hb_list[xi->tx_hb_free]];
51 return hb;
52 }
54 static __inline VOID
55 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
56 {
57 xi->tx_hb_list[xi->tx_hb_free] = hb->id;
58 xi->tx_hb_free++;
59 }
61 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
63 /* Called at DISPATCH_LEVEL with tx_lock held */
64 /*
65 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
66 */
67 static BOOLEAN
68 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
69 {
70 struct netif_tx_request *tx0 = NULL;
71 struct netif_tx_request *txN = NULL;
72 struct netif_extra_info *ei = NULL;
73 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
74 //UINT total_packet_length;
75 ULONG mss = 0;
76 uint16_t flags = NETTXF_more_data;
77 packet_info_t pi;
78 BOOLEAN ndis_lso = FALSE;
79 BOOLEAN xen_gso = FALSE;
80 //ULONG remaining;
81 PSCATTER_GATHER_LIST sg;
82 ULONG sg_element = 0;
83 ULONG sg_offset = 0;
84 ULONG parse_result;
85 shared_buffer_t *header_buf = NULL;
87 //FUNCTION_ENTER();
89 XenNet_ClearPacketInfo(&pi);
90 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
91 //KdPrint((__DRIVER_NAME " A - packet = %p, mdl_count = %d, total_length = %d\n", packet, pi.mdl_count, pi.total_length));
93 parse_result = XenNet_ParsePacketHeader(&pi);
94 //KdPrint((__DRIVER_NAME " B\n"));
96 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
97 {
98 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
99 packet, TcpIpChecksumPacketInfo);
100 if (csum_info->Transmit.NdisPacketChecksumV4)
101 {
102 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
103 {
104 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
105 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
106 //return TRUE;
107 }
108 if (csum_info->Transmit.NdisPacketTcpChecksum)
109 {
110 if (xi->setting_csum.V4Transmit.TcpChecksum)
111 {
112 flags |= NETTXF_csum_blank | NETTXF_data_validated;
113 }
114 else
115 {
116 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
117 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
118 //return TRUE;
119 }
120 }
121 else if (csum_info->Transmit.NdisPacketUdpChecksum)
122 {
123 if (xi->setting_csum.V4Transmit.UdpChecksum)
124 {
125 flags |= NETTXF_csum_blank | NETTXF_data_validated;
126 }
127 else
128 {
129 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
130 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
131 //return TRUE;
132 }
133 }
134 }
135 else if (csum_info->Transmit.NdisPacketChecksumV6)
136 {
137 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
138 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
139 //return TRUE;
140 }
141 }
143 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
145 if (mss)
146 {
147 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
148 {
149 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
150 }
151 ndis_lso = TRUE;
152 if (mss > xi->setting_max_offload)
153 {
154 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
155 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
156 //FUNCTION_EXIT();
157 return TRUE;
158 }
159 }
161 if (pi.mdl_count + !!ndis_lso > xi->tx_ring_free)
162 {
163 KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", pi.mdl_count + !!ndis_lso, xi->tx_ring_free));
164 //FUNCTION_EXIT();
165 return FALSE;
166 }
168 sg = (PSCATTER_GATHER_LIST)NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
169 ASSERT(sg != NULL);
171 if (ndis_lso || (pi.header_length && pi.header_length > sg->Elements[sg_element].Length && pi.header == pi.header_data))
172 {
173 // why is a hb being used for icmp???
174 header_buf = get_hb_from_freelist(xi);
175 if (!header_buf)
176 {
177 KdPrint((__DRIVER_NAME " Full on send - no free hb's\n"));
178 return FALSE;
179 }
180 }
182 if (ndis_lso)
183 {
184 if (parse_result == PARSE_OK)
185 {
186 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
187 if (pi.tcp_length >= mss)
188 {
189 flags |= NETTXF_extra_info;
190 xen_gso = TRUE;
191 }
192 else
193 {
194 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
195 }
196 }
197 else
198 {
199 KdPrint((__DRIVER_NAME " could not parse packet - no large send offload done\n"));
200 }
201 }
203 /*
204 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
205 * (C) rest of requests on the ring. Only (A) has csum flags.
206 */
208 //KdPrint((__DRIVER_NAME " C\n"));
209 /* (A) */
210 // if we coalesced the header then we want to put that on first, otherwise we put on the first sg element
211 tx0 = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
212 tx0->id = 0xFFFF;
213 if (header_buf)
214 {
215 ULONG remaining = pi.header_length;
216 ASSERT(pi.header_length < TX_HEADER_BUFFER_SIZE);
217 //KdPrint((__DRIVER_NAME " D - header_length = %d\n", pi.header_length));
218 memcpy(header_buf->virtual, pi.header, pi.header_length);
219 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
220 XenNet_SumIpHeader(header_buf->virtual, pi.ip4_header_length);
221 tx0->gref = (grant_ref_t)(header_buf->logical.QuadPart >> PAGE_SHIFT);
222 tx0->offset = (USHORT)header_buf->logical.LowPart & (PAGE_SIZE - 1);
223 tx0->size = (USHORT)pi.header_length;
224 ASSERT(tx0->offset + tx0->size <= PAGE_SIZE);
225 ASSERT(tx0->size);
226 /* TODO: if the next buffer contains only a small amount of data then put it on too */
227 while (remaining)
228 {
229 //KdPrint((__DRIVER_NAME " D - remaining = %d\n", remaining));
230 //KdPrint((__DRIVER_NAME " Da - sg_element = %d, sg->Elements[sg_element].Length = %d\n", sg_element, sg->Elements[sg_element].Length));
231 if (sg->Elements[sg_element].Length <= remaining)
232 {
233 remaining -= sg->Elements[sg_element].Length;
234 sg_element++;
235 }
236 else
237 {
238 sg_offset = remaining;
239 remaining = 0;
240 }
241 }
242 }
243 else
244 {
245 //KdPrint((__DRIVER_NAME " E\n"));
246 //KdPrint((__DRIVER_NAME " Eg - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
247 //KdPrint((__DRIVER_NAME " Eh - address = %p, length = %d\n",
248 // sg->Elements[sg_element].Address.LowPart, sg->Elements[sg_element].Length));
249 tx0->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
250 tx0->offset = (USHORT)sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1);
251 tx0->size = (USHORT)sg->Elements[sg_element].Length;
252 ASSERT(tx0->size);
253 sg_element++;
254 }
255 tx0->flags = flags;
256 txN = tx0;
257 xi->tx.req_prod_pvt++;
259 /* (B) */
260 if (xen_gso)
261 {
262 ASSERT(flags & NETTXF_extra_info);
263 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
264 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
265 ei->flags = 0;
266 ei->u.gso.size = (USHORT)mss;
267 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
268 ei->u.gso.pad = 0;
269 ei->u.gso.features = 0;
270 xi->tx.req_prod_pvt++;
271 }
273 //KdPrint((__DRIVER_NAME " F\n"));
274 /* (C) */
275 while (sg_element < sg->NumberOfElements)
276 {
277 //KdPrint((__DRIVER_NAME " G - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
278 //KdPrint((__DRIVER_NAME " H - address = %p, length = %d\n",
279 // sg->Elements[sg_element].Address.LowPart + sg_offset, sg->Elements[sg_element].Length - sg_offset));
280 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
281 txN->id = 0xFFFF;
282 txN->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
283 txN->offset = (USHORT)(sg->Elements[sg_element].Address.LowPart + sg_offset) & (PAGE_SIZE - 1);
284 txN->size = (USHORT)(sg->Elements[sg_element].Length - sg_offset);
285 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
286 ASSERT(txN->size);
287 tx0->size = tx0->size + txN->size;
288 txN->flags = NETTXF_more_data;
289 sg_element++;
290 sg_offset = 0;
291 xi->tx.req_prod_pvt++;
292 }
293 txN->flags &= ~NETTXF_more_data;
294 txN->id = get_id_from_freelist(xi);
295 //KdPrint((__DRIVER_NAME " send - id = %d\n", tx0->id));
296 xi->tx_shadows[txN->id].packet = packet;
297 xi->tx_shadows[txN->id].hb = header_buf;
299 if (ndis_lso)
300 {
301 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
302 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
303 }
305 xi->stat_tx_ok++;
307 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
308 //FUNCTION_EXIT();
309 xi->tx_outstanding++;
310 return TRUE;
311 }
313 /* Called at DISPATCH_LEVEL with tx_lock held */
315 static VOID
316 XenNet_SendQueuedPackets(struct xennet_info *xi)
317 {
318 PLIST_ENTRY entry;
319 PNDIS_PACKET packet;
320 int notify;
322 //FUNCTION_ENTER();
324 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
325 return;
327 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
328 /* if empty, the above returns head*, not NULL */
329 while (entry != &xi->tx_waiting_pkt_list)
330 {
331 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
332 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
333 if (!XenNet_HWSendPacket(xi, packet))
334 {
335 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
336 break;
337 }
338 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
339 }
341 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
342 if (notify)
343 {
344 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
345 }
346 //FUNCTION_EXIT();
347 }
349 //ULONG packets_outstanding = 0;
350 // Called at DISPATCH_LEVEL
351 VOID
352 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
353 {
354 struct xennet_info *xi = context;
355 RING_IDX cons, prod;
356 PNDIS_PACKET head = NULL, tail = NULL;
357 PNDIS_PACKET packet;
359 UNREFERENCED_PARAMETER(dpc);
360 UNREFERENCED_PARAMETER(arg1);
361 UNREFERENCED_PARAMETER(arg2);
363 //FUNCTION_ENTER();
365 ASSERT(xi->connected);
366 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
368 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
370 do {
371 prod = xi->tx.sring->rsp_prod;
372 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
374 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
375 {
376 struct netif_tx_response *txrsp;
377 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
378 if (txrsp->status == NETIF_RSP_NULL || txrsp->id == 0xFFFF)
379 continue;
381 if (xi->tx_shadows[txrsp->id].hb)
382 {
383 put_hb_on_freelist(xi, xi->tx_shadows[txrsp->id].hb);
384 xi->tx_shadows[txrsp->id].hb = NULL;
385 }
387 if (xi->tx_shadows[txrsp->id].packet)
388 {
389 packet = xi->tx_shadows[txrsp->id].packet;
390 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
391 if (head)
392 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
393 else
394 head = packet;
395 tail = packet;
396 xi->tx_shadows[txrsp->id].packet = NULL;
397 }
398 put_id_on_freelist(xi, txrsp->id);
399 }
401 xi->tx.rsp_cons = prod;
402 xi->tx.sring->rsp_event = prod + (NET_TX_RING_SIZE >> 2);
403 KeMemoryBarrier();
404 } while (prod != xi->tx.sring->rsp_prod);
406 /* if queued packets, send them now */
407 XenNet_SendQueuedPackets(xi);
409 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
411 while (head)
412 {
413 packet = (PNDIS_PACKET)head;
414 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
415 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
416 xi->tx_outstanding--;
417 if (!xi->tx_outstanding && xi->tx_shutting_down)
418 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
419 }
421 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
422 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
423 && xi->tx_id_free == NET_TX_RING_SIZE)
424 {
425 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
426 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
427 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
428 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
429 }
431 //FUNCTION_EXIT();
432 }
434 // called at <= DISPATCH_LEVEL
435 VOID DDKAPI
436 XenNet_SendPackets(
437 IN NDIS_HANDLE MiniportAdapterContext,
438 IN PPNDIS_PACKET PacketArray,
439 IN UINT NumberOfPackets
440 )
441 {
442 struct xennet_info *xi = MiniportAdapterContext;
443 PNDIS_PACKET packet;
444 UINT i;
445 PLIST_ENTRY entry;
446 KIRQL OldIrql;
448 //FUNCTION_ENTER();
450 if (xi->inactive)
451 {
452 for (i = 0; i < NumberOfPackets; i++)
453 {
454 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
455 }
456 return;
457 }
459 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
461 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (packets = %d, free_requests = %d)\n", NumberOfPackets, free_requests(xi)));
462 for (i = 0; i < NumberOfPackets; i++)
463 {
464 packet = PacketArray[i];
465 //packets_outstanding++;
466 //KdPrint(("+packet = %p\n", packet));
467 ASSERT(packet);
468 *(ULONG *)&packet->MiniportReservedEx = 0;
469 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
470 InsertTailList(&xi->tx_waiting_pkt_list, entry);
471 }
473 XenNet_SendQueuedPackets(xi);
475 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
477 //FUNCTION_EXIT();
478 }
480 VOID
481 XenNet_TxResumeStart(xennet_info_t *xi)
482 {
483 UNREFERENCED_PARAMETER(xi);
485 FUNCTION_ENTER();
486 /* nothing to do here - all packets were already sent */
487 FUNCTION_EXIT();
488 }
490 VOID
491 XenNet_TxResumeEnd(xennet_info_t *xi)
492 {
493 KIRQL old_irql;
495 FUNCTION_ENTER();
497 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
498 XenNet_SendQueuedPackets(xi);
499 KeReleaseSpinLock(&xi->tx_lock, old_irql);
501 FUNCTION_EXIT();
502 }
504 BOOLEAN
505 XenNet_TxInit(xennet_info_t *xi)
506 {
507 USHORT i, j;
509 KeInitializeSpinLock(&xi->tx_lock);
510 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
511 /* dpcs are only serialised to a single processor */
512 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
513 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
514 InitializeListHead(&xi->tx_waiting_pkt_list);
516 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
517 xi->tx_shutting_down = FALSE;
518 xi->tx_outstanding = 0;
519 xi->tx_ring_free = NET_TX_RING_SIZE;
521 for (i = 0; i < TX_HEADER_BUFFERS / (PAGE_SIZE / TX_HEADER_BUFFER_SIZE); i++)
522 {
523 PVOID virtual;
524 NDIS_PHYSICAL_ADDRESS logical;
525 NdisMAllocateSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, &virtual, &logical);
526 if (virtual == NULL)
527 continue;
528 //KdPrint((__DRIVER_NAME " Allocated SharedMemory at %p\n", virtual));
529 for (j = 0; j < PAGE_SIZE / TX_HEADER_BUFFER_SIZE; j++)
530 {
531 USHORT index = i * (PAGE_SIZE / TX_HEADER_BUFFER_SIZE) + j;
532 xi->tx_hbs[index].id = index;
533 xi->tx_hbs[index].virtual = (PUCHAR)virtual + j * TX_HEADER_BUFFER_SIZE;
534 xi->tx_hbs[index].logical.QuadPart = logical.QuadPart + j * TX_HEADER_BUFFER_SIZE;
535 put_hb_on_freelist(xi, &xi->tx_hbs[index]);
536 }
537 }
538 if (i == 0)
539 KdPrint((__DRIVER_NAME " Unable to allocate any SharedMemory buffers\n"));
541 xi->tx_id_free = 0;
542 for (i = 0; i < NET_TX_RING_SIZE; i++)
543 {
544 put_id_on_freelist(xi, i);
545 }
547 return TRUE;
548 }
550 /*
551 The ring is completely closed down now. We just need to empty anything left
552 on our freelists and harvest anything left on the rings.
553 */
555 BOOLEAN
556 XenNet_TxShutdown(xennet_info_t *xi)
557 {
558 PLIST_ENTRY entry;
559 PNDIS_PACKET packet;
560 //PMDL mdl;
561 //ULONG i;
562 KIRQL OldIrql;
563 shared_buffer_t *hb;
565 FUNCTION_ENTER();
567 ASSERT(!xi->connected);
569 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
571 xi->tx_shutting_down = TRUE;
573 /* Free packets in tx queue */
574 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
575 while (entry != &xi->tx_waiting_pkt_list)
576 {
577 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
578 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
579 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
580 }
582 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
584 while (xi->tx_outstanding)
585 {
586 KdPrint((__DRIVER_NAME " Waiting for all packets to be sent\n"));
587 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
588 }
590 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
592 while((hb = get_hb_from_freelist(xi)) != NULL)
593 {
594 /* only free the actual buffers which were aligned on a page boundary */
595 if ((PtrToUlong(hb->virtual) & (PAGE_SIZE - 1)) == 0)
596 NdisMFreeSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, hb->virtual, hb->logical);
597 }
599 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
601 FUNCTION_EXIT();
603 return TRUE;
604 }