win-pvdrivers

view xennet/xennet_tx.c @ 783:644e5ddb1b47

Handle SCSI INQUIRY command better in xenvbd
author James Harper <james.harper@bendigoit.com.au>
date Mon Feb 15 20:53:57 2010 +1100 (2010-02-15)
parents 6304eb6bb690
children 467005e7f509
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Not really necessary but keeps PREfast happy */
24 static KDEFERRED_ROUTINE XenNet_TxBufferGC;
26 static USHORT
27 get_id_from_freelist(struct xennet_info *xi)
28 {
29 ASSERT(xi->tx_id_free);
30 xi->tx_id_free--;
32 return xi->tx_id_list[xi->tx_id_free];
33 }
35 static VOID
36 put_id_on_freelist(struct xennet_info *xi, USHORT id)
37 {
38 xi->tx_id_list[xi->tx_id_free] = id;
39 xi->tx_id_free++;
40 }
42 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
44 static __forceinline struct netif_tx_request *
45 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
46 {
47 struct netif_tx_request *tx;
48 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
49 xi->tx.req_prod_pvt++;
50 xi->tx_ring_free--;
51 tx->id = get_id_from_freelist(xi);
52 ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
53 ASSERT(!xi->tx_shadows[tx->id].cb);
54 xi->tx_shadows[tx->id].cb = coalesce_buf;
55 tx->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref);
56 xi->tx_shadows[tx->id].gref = tx->gref;
57 tx->offset = 0;
58 tx->size = (USHORT)length;
59 ASSERT(tx->offset + tx->size <= PAGE_SIZE);
60 ASSERT(tx->size);
61 return tx;
62 }
64 /* Called at DISPATCH_LEVEL with tx_lock held */
65 /*
66 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
67 */
68 static BOOLEAN
69 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
70 {
71 struct netif_tx_request *tx0 = NULL;
72 struct netif_tx_request *txN = NULL;
73 struct netif_extra_info *ei = NULL;
74 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
75 ULONG mss = 0;
76 uint16_t flags = NETTXF_more_data;
77 packet_info_t pi;
78 BOOLEAN ndis_lso = FALSE;
79 BOOLEAN xen_gso = FALSE;
80 ULONG remaining;
81 ULONG parse_result;
82 ULONG frags = 0;
83 BOOLEAN coalesce_required = FALSE;
84 PVOID coalesce_buf;
85 ULONG coalesce_remaining = 0;
86 grant_ref_t gref;
87 ULONG tx_length = 0;
89 //FUNCTION_ENTER();
91 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context);
92 if (gref == INVALID_GRANT_REF)
93 {
94 KdPrint((__DRIVER_NAME " out of grefs\n"));
95 return FALSE;
96 }
97 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
98 if (!coalesce_buf)
99 {
100 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref);
101 KdPrint((__DRIVER_NAME " out of memory\n"));
102 return FALSE;
103 }
104 XenNet_ClearPacketInfo(&pi);
105 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
107 pi.curr_mdl_offset = 0;
108 pi.curr_buffer = pi.first_buffer;
109 remaining = min(pi.total_length, PAGE_SIZE);
110 while (remaining) /* this much gets put in the header */
111 {
112 ULONG length = XenNet_QueryData(&pi, remaining);
113 remaining -= length;
114 XenNet_EatData(&pi, length);
115 }
116 frags++;
117 if (pi.total_length > PAGE_SIZE) /* these are the frags we care about */
118 {
119 remaining = pi.total_length - PAGE_SIZE;
120 while (remaining)
121 {
122 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
123 if (length != 0)
124 {
125 frags++;
126 if (frags > LINUX_MAX_SG_ELEMENTS)
127 break; /* worst case there could be hundreds of fragments - leave the loop now */
128 }
129 remaining -= length;
130 XenNet_EatData(&pi, length);
131 }
132 }
133 if (frags > LINUX_MAX_SG_ELEMENTS)
134 {
135 frags = LINUX_MAX_SG_ELEMENTS;
136 coalesce_required = TRUE;
137 }
139 /* if we have enough space on the ring then we have enough id's so no need to check for that */
140 if (xi->tx_ring_free < frags + 1)
141 {
142 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref);
143 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
144 //KdPrint((__DRIVER_NAME " Full on send - ring full\n"));
145 return FALSE;
146 }
148 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
149 remaining = pi.total_length - pi.header_length;
151 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
152 {
153 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
154 packet, TcpIpChecksumPacketInfo);
155 if (csum_info->Transmit.NdisPacketChecksumV4)
156 {
157 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
158 {
159 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
160 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
161 //return TRUE;
162 }
163 if (csum_info->Transmit.NdisPacketTcpChecksum)
164 {
165 if (xi->setting_csum.V4Transmit.TcpChecksum)
166 {
167 flags |= NETTXF_csum_blank | NETTXF_data_validated;
168 }
169 else
170 {
171 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
172 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
173 //return TRUE;
174 }
175 }
176 else if (csum_info->Transmit.NdisPacketUdpChecksum)
177 {
178 if (xi->setting_csum.V4Transmit.UdpChecksum)
179 {
180 flags |= NETTXF_csum_blank | NETTXF_data_validated;
181 }
182 else
183 {
184 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
185 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
186 //return TRUE;
187 }
188 }
189 }
190 else if (csum_info->Transmit.NdisPacketChecksumV6)
191 {
192 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
193 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
194 //return TRUE;
195 }
196 }
198 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
200 if (mss && parse_result == PARSE_OK)
201 {
202 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
203 {
204 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
205 }
206 ndis_lso = TRUE;
207 if (mss > xi->setting_max_offload)
208 {
209 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
210 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
211 //FUNCTION_EXIT();
212 return TRUE;
213 }
214 }
216 if (ndis_lso)
217 {
218 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
219 if (pi.tcp_length >= mss)
220 {
221 flags |= NETTXF_extra_info;
222 xen_gso = TRUE;
223 }
224 else
225 {
226 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
227 }
228 }
230 /*
231 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
232 * (C) rest of requests on the ring. Only (A) has csum flags.
233 */
235 /* (A) */
236 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
237 ASSERT(tx0); /* this will never happen */
238 tx0->flags = flags;
239 tx_length += pi.header_length;
241 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
242 if (ndis_lso)
243 {
244 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
245 }
246 txN = tx0;
248 /* (B) */
249 if (xen_gso)
250 {
251 ASSERT(flags & NETTXF_extra_info);
252 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
253 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx.req_prod_pvt));
254 xi->tx.req_prod_pvt++;
255 xi->tx_ring_free--;
256 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
257 ei->flags = 0;
258 ei->u.gso.size = (USHORT)mss;
259 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
260 ei->u.gso.pad = 0;
261 ei->u.gso.features = 0;
262 }
264 ASSERT(xi->config_sg || !remaining);
266 /* (C) - only if data is remaining */
267 coalesce_buf = NULL;
268 while (remaining > 0)
269 {
270 ULONG length;
271 PFN_NUMBER pfn;
273 ASSERT(pi.curr_buffer);
274 if (coalesce_required)
275 {
276 PVOID va;
277 if (!coalesce_buf)
278 {
279 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context);
280 if (gref == INVALID_GRANT_REF)
281 {
282 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
283 break;
284 }
285 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
286 if (!coalesce_buf)
287 {
288 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref);
289 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
290 break;
291 }
292 coalesce_remaining = min(PAGE_SIZE, remaining);
293 }
294 length = XenNet_QueryData(&pi, coalesce_remaining);
295 va = NdisBufferVirtualAddressSafe(pi.curr_buffer, LowPagePriority);
296 if (!va)
297 {
298 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
299 coalesce_remaining = 0;
300 remaining -= min(PAGE_SIZE, remaining);
301 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
302 }
303 else
304 {
305 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
306 coalesce_remaining -= length;
307 }
308 }
309 else
310 {
311 length = XenNet_QueryData(&pi, PAGE_SIZE);
312 }
313 if (!length || coalesce_remaining) /* sometimes there are zero length buffers... */
314 {
315 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
316 continue;
317 }
319 if (coalesce_buf)
320 {
321 if (remaining)
322 {
323 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
324 ASSERT(txN);
325 coalesce_buf = NULL;
326 remaining -= min(PAGE_SIZE, remaining);
327 tx_length += min(PAGE_SIZE, remaining);
328 }
329 }
330 else
331 {
332 ULONG offset;
334 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context);
335 if (gref == INVALID_GRANT_REF)
336 {
337 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
338 break;
339 }
340 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
341 xi->tx.req_prod_pvt++;
342 xi->tx_ring_free--;
343 txN->id = get_id_from_freelist(xi);
344 ASSERT(!xi->tx_shadows[txN->id].cb);
345 offset = MmGetMdlByteOffset(pi.curr_buffer) + pi.curr_mdl_offset;
346 pfn = MmGetMdlPfnArray(pi.curr_buffer)[offset >> PAGE_SHIFT];
347 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
348 txN->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)pfn, FALSE, gref);
349 ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
350 xi->tx_shadows[txN->id].gref = txN->gref;
351 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
352 txN->size = (USHORT)length;
353 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
354 ASSERT(txN->size);
355 ASSERT(txN->gref != INVALID_GRANT_REF);
356 remaining -= length;
357 tx_length += length;
358 }
359 tx0->size = tx0->size + txN->size;
360 txN->flags = NETTXF_more_data;
361 XenNet_EatData(&pi, length);
362 }
363 txN->flags &= ~NETTXF_more_data;
364 ASSERT(tx0->size == pi.total_length);
365 ASSERT(!xi->tx_shadows[txN->id].packet);
366 xi->tx_shadows[txN->id].packet = packet;
368 if (ndis_lso)
369 {
370 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
371 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
372 }
374 xi->stat_tx_ok++;
376 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
377 //FUNCTION_EXIT();
378 xi->tx_outstanding++;
379 return TRUE;
380 }
382 /* Called at DISPATCH_LEVEL with tx_lock held */
384 static VOID
385 XenNet_SendQueuedPackets(struct xennet_info *xi)
386 {
387 PLIST_ENTRY entry;
388 PNDIS_PACKET packet;
389 int notify;
391 //FUNCTION_ENTER();
393 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
394 return;
396 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
397 /* if empty, the above returns head*, not NULL */
398 while (entry != &xi->tx_waiting_pkt_list)
399 {
400 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
401 if (!XenNet_HWSendPacket(xi, packet))
402 {
403 //KdPrint((__DRIVER_NAME " No room for packet\n"));
404 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
405 break;
406 }
407 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
408 }
410 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
411 if (notify)
412 {
413 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
414 }
415 //FUNCTION_EXIT();
416 }
418 //ULONG packets_outstanding = 0;
419 // Called at DISPATCH_LEVEL
420 static VOID
421 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
422 {
423 struct xennet_info *xi = context;
424 RING_IDX cons, prod;
425 PNDIS_PACKET head = NULL, tail = NULL;
426 PNDIS_PACKET packet;
427 ULONG tx_packets = 0;
429 UNREFERENCED_PARAMETER(dpc);
430 UNREFERENCED_PARAMETER(arg1);
431 UNREFERENCED_PARAMETER(arg2);
433 //FUNCTION_ENTER();
435 ASSERT(xi->connected);
436 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
438 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
440 if (xi->tx_shutting_down && !xi->tx_outstanding)
441 {
442 /* there is a chance that our Dpc had been queued just before the shutdown... */
443 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
444 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
445 return;
446 }
448 do {
449 prod = xi->tx.sring->rsp_prod;
450 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
452 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
453 {
454 struct netif_tx_response *txrsp;
455 tx_shadow_t *shadow;
457 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
459 xi->tx_ring_free++;
461 if (txrsp->status == NETIF_RSP_NULL)
462 {
463 continue;
464 }
466 shadow = &xi->tx_shadows[txrsp->id];
467 if (shadow->cb)
468 {
469 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
470 shadow->cb = NULL;
471 }
473 if (shadow->gref != INVALID_GRANT_REF)
474 {
475 xi->vectors.GntTbl_EndAccess(xi->vectors.context,
476 shadow->gref, FALSE);
477 shadow->gref = INVALID_GRANT_REF;
478 }
480 if (shadow->packet)
481 {
482 packet = shadow->packet;
483 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
484 if (head)
485 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
486 else
487 head = packet;
488 tail = packet;
489 shadow->packet = NULL;
490 }
491 put_id_on_freelist(xi, txrsp->id);
492 }
494 xi->tx.rsp_cons = prod;
495 /* resist the temptation to set the event more than +1... it breaks things */
496 xi->tx.sring->rsp_event = prod + 1;
497 KeMemoryBarrier();
498 } while (prod != xi->tx.sring->rsp_prod);
500 /* if queued packets, send them now */
501 if (!xi->tx_shutting_down)
502 XenNet_SendQueuedPackets(xi);
504 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
506 /* must be done without holding any locks */
507 while (head)
508 {
509 packet = (PNDIS_PACKET)head;
510 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
511 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
512 tx_packets++;
513 }
515 /* must be done after we have truly given back all packets */
516 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
517 xi->tx_outstanding -= tx_packets;
518 if (!xi->tx_outstanding && xi->tx_shutting_down)
519 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
520 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
522 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
523 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
524 && xi->tx_id_free == NET_TX_RING_SIZE)
525 {
526 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
527 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
528 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
529 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
530 }
532 //FUNCTION_EXIT();
533 }
535 // called at <= DISPATCH_LEVEL
536 VOID DDKAPI
537 XenNet_SendPackets(
538 IN NDIS_HANDLE MiniportAdapterContext,
539 IN PPNDIS_PACKET PacketArray,
540 IN UINT NumberOfPackets
541 )
542 {
543 struct xennet_info *xi = MiniportAdapterContext;
544 PNDIS_PACKET packet;
545 UINT i;
546 PLIST_ENTRY entry;
547 KIRQL OldIrql;
549 //FUNCTION_ENTER();
551 if (xi->inactive)
552 {
553 for (i = 0; i < NumberOfPackets; i++)
554 {
555 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
556 }
557 return;
558 }
560 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
562 for (i = 0; i < NumberOfPackets; i++)
563 {
564 packet = PacketArray[i];
565 ASSERT(packet);
566 *(ULONG *)&packet->MiniportReservedEx = 0;
567 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
568 InsertTailList(&xi->tx_waiting_pkt_list, entry);
569 }
571 XenNet_SendQueuedPackets(xi);
573 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
575 //FUNCTION_EXIT();
576 }
578 VOID
579 XenNet_CancelSendPackets(
580 NDIS_HANDLE MiniportAdapterContext,
581 PVOID CancelId)
582 {
583 struct xennet_info *xi = MiniportAdapterContext;
584 KIRQL old_irql;
585 PLIST_ENTRY entry;
586 PNDIS_PACKET packet;
587 PNDIS_PACKET head = NULL, tail = NULL;
588 BOOLEAN result;
590 FUNCTION_ENTER();
592 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
594 entry = xi->tx_waiting_pkt_list.Flink;
595 while (entry != &xi->tx_waiting_pkt_list)
596 {
597 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
598 entry = entry->Flink;
599 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
600 {
601 KdPrint((__DRIVER_NAME " Found packet to cancel %p\n", packet));
602 result = RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
603 ASSERT(result);
604 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
605 if (head)
606 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
607 else
608 head = packet;
609 tail = packet;
610 }
611 }
613 KeReleaseSpinLock(&xi->tx_lock, old_irql);
615 while (head)
616 {
617 packet = (PNDIS_PACKET)head;
618 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
619 KdPrint((__DRIVER_NAME " NdisMSendComplete(%p)\n", packet));
620 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
621 }
623 FUNCTION_EXIT();
624 }
626 VOID
627 XenNet_TxResumeStart(xennet_info_t *xi)
628 {
629 UNREFERENCED_PARAMETER(xi);
631 FUNCTION_ENTER();
632 /* nothing to do here - all packets were already sent */
633 FUNCTION_EXIT();
634 }
636 VOID
637 XenNet_TxResumeEnd(xennet_info_t *xi)
638 {
639 KIRQL old_irql;
641 FUNCTION_ENTER();
643 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
644 XenNet_SendQueuedPackets(xi);
645 KeReleaseSpinLock(&xi->tx_lock, old_irql);
647 FUNCTION_EXIT();
648 }
650 BOOLEAN
651 XenNet_TxInit(xennet_info_t *xi)
652 {
653 USHORT i;
655 KeInitializeSpinLock(&xi->tx_lock);
656 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
657 /* dpcs are only serialised to a single processor */
658 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
659 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
660 InitializeListHead(&xi->tx_waiting_pkt_list);
662 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
663 xi->tx_shutting_down = FALSE;
664 xi->tx_outstanding = 0;
665 xi->tx_ring_free = NET_TX_RING_SIZE;
667 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
668 PAGE_SIZE, XENNET_POOL_TAG, 0);
670 xi->tx_id_free = 0;
671 for (i = 0; i < NET_TX_RING_SIZE; i++)
672 {
673 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
674 xi->tx_shadows[i].cb = NULL;
675 put_id_on_freelist(xi, i);
676 }
678 return TRUE;
679 }
681 /*
682 The ring is completely closed down now. We just need to empty anything left
683 on our freelists and harvest anything left on the rings.
684 */
686 BOOLEAN
687 XenNet_TxShutdown(xennet_info_t *xi)
688 {
689 PLIST_ENTRY entry;
690 PNDIS_PACKET packet;
691 //PMDL mdl;
692 //ULONG i;
693 KIRQL OldIrql;
695 FUNCTION_ENTER();
697 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
698 xi->tx_shutting_down = TRUE;
699 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
701 while (xi->tx_outstanding)
702 {
703 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
704 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
705 }
707 KeRemoveQueueDpc(&xi->tx_dpc);
708 KeFlushQueuedDpcs();
710 /* Free packets in tx queue */
711 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
712 while (entry != &xi->tx_waiting_pkt_list)
713 {
714 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
715 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
716 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
717 }
719 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
721 FUNCTION_EXIT();
723 return TRUE;
724 }