win-pvdrivers

view xennet/xennet_tx.c @ 1079:a60d401aa020

Tidy up asserts in xennet. Add a few new ones.
author James Harper <james.harper@bendigoit.com.au>
date Mon Dec 09 13:33:26 2013 +1100 (2013-12-09)
parents 83201dc2ea3f
children c94174bbf195
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
24 static USHORT
25 get_id_from_freelist(struct xennet_info *xi)
26 {
27 XN_ASSERT(xi->tx_id_free);
28 xi->tx_id_free--;
30 return xi->tx_id_list[xi->tx_id_free];
31 }
33 static VOID
34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
35 {
36 xi->tx_id_list[xi->tx_id_free] = id;
37 xi->tx_id_free++;
38 }
40 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
42 static __forceinline struct netif_tx_request *
43 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
44 {
45 struct netif_tx_request *tx;
46 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
47 xi->tx_ring.req_prod_pvt++;
48 XN_ASSERT(xi->tx_ring_free);
49 xi->tx_ring_free--;
50 tx->id = get_id_from_freelist(xi);
51 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
52 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
53 xi->tx_shadows[tx->id].cb = coalesce_buf;
54 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
55 xi->tx_shadows[tx->id].gref = tx->gref;
56 tx->offset = 0;
57 tx->size = (USHORT)length;
58 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
59 XN_ASSERT(tx->size);
60 return tx;
61 }
63 #if 0
64 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
65 UINT mdl_count;
66 PMDL first_mdl;
67 UINT total_length;
69 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
70 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
71 }
72 #endif
74 /* Called at DISPATCH_LEVEL with tx_lock held */
75 /*
76 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
77 */
78 #if NTDDI_VERSION < NTDDI_VISTA
79 static BOOLEAN
80 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
81 #else
82 static BOOLEAN
83 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
84 #endif
85 struct netif_tx_request *tx0 = NULL;
86 struct netif_tx_request *txN = NULL;
87 struct netif_extra_info *ei = NULL;
88 ULONG mss = 0;
89 #if NTDDI_VERSION < NTDDI_VISTA
90 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
91 UINT mdl_count;
92 #else
93 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
94 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
95 #endif
96 uint16_t flags = NETTXF_more_data;
97 packet_info_t pi;
98 BOOLEAN ndis_lso = FALSE;
99 BOOLEAN xen_gso = FALSE;
100 ULONG remaining;
101 ULONG frags = 0;
102 BOOLEAN coalesce_required = FALSE;
103 PVOID coalesce_buf;
104 ULONG coalesce_remaining = 0;
105 grant_ref_t gref;
106 ULONG tx_length = 0;
108 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
109 if (gref == INVALID_GRANT_REF)
110 {
111 FUNCTION_MSG("out of grefs\n");
112 return FALSE;
113 }
114 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
115 if (!coalesce_buf)
116 {
117 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
118 FUNCTION_MSG("out of memory\n");
119 return FALSE;
120 }
121 XenNet_ClearPacketInfo(&pi);
122 #if NTDDI_VERSION < NTDDI_VISTA
123 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
124 pi.curr_mdl = pi.first_mdl;
125 #else
126 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
127 IoBuildPartialMdl(packet->CurrentMdl,
128 &pi.first_mdl_storage,
129 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
130 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
131 pi.total_length = packet->DataLength;
132 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
133 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
134 #endif
135 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
136 remaining = min(pi.total_length, PAGE_SIZE);
137 while (remaining) { /* this much gets put in the header */
138 ULONG length = XenNet_QueryData(&pi, remaining);
139 remaining -= length;
140 XenNet_EatData(&pi, length);
141 }
142 frags++;
143 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
144 remaining = pi.total_length - PAGE_SIZE;
145 while (remaining) {
146 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
147 if (length != 0) {
148 frags++;
149 if (frags > LINUX_MAX_SG_ELEMENTS)
150 break; /* worst case there could be hundreds of fragments - leave the loop now */
151 }
152 remaining -= length;
153 XenNet_EatData(&pi, length);
154 }
155 }
156 if (frags > LINUX_MAX_SG_ELEMENTS) {
157 frags = LINUX_MAX_SG_ELEMENTS;
158 coalesce_required = TRUE;
159 }
161 /* if we have enough space on the ring then we have enough id's so no need to check for that */
162 if (xi->tx_ring_free < frags + 1) {
163 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
164 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
165 //FUNCTION_MSG("Full on send - ring full\n");
166 return FALSE;
167 }
168 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
169 remaining = pi.total_length - pi.header_length;
170 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
171 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
172 }
174 #if NTDDI_VERSION < NTDDI_VISTA
175 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
176 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
177 packet, TcpIpChecksumPacketInfo);
178 if (csum_info->Transmit.NdisPacketChecksumV4) {
179 if (csum_info->Transmit.NdisPacketTcpChecksum) {
180 flags |= NETTXF_csum_blank | NETTXF_data_validated;
181 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
182 flags |= NETTXF_csum_blank | NETTXF_data_validated;
183 }
184 }
185 }
186 #else
187 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
188 if (csum_info.Transmit.IsIPv4) {
189 if (csum_info.Transmit.TcpChecksum) {
190 flags |= NETTXF_csum_blank | NETTXF_data_validated;
191 } else if (csum_info.Transmit.UdpChecksum) {
192 flags |= NETTXF_csum_blank | NETTXF_data_validated;
193 }
194 } else if (csum_info.Transmit.IsIPv6) {
195 FUNCTION_MSG("Transmit.IsIPv6 not supported\n");
196 }
197 #endif
199 #if NTDDI_VERSION < NTDDI_VISTA
200 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
201 #else
202 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
203 switch (lso_info.Transmit.Type) {
204 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
205 mss = lso_info.LsoV1Transmit.MSS;
206 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
207 break;
208 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
209 mss = lso_info.LsoV2Transmit.MSS;
210 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
211 break;
212 }
213 #endif
214 if (mss && pi.parse_result == PARSE_OK) {
215 ndis_lso = TRUE;
216 }
218 if (ndis_lso) {
219 ULONG csum;
220 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
221 if (pi.tcp_length >= mss) {
222 flags |= NETTXF_extra_info;
223 xen_gso = TRUE;
224 }
225 /* Adjust pseudoheader checksum to be what Linux expects (remove the tcp_length) */
226 csum = ~RtlUshortByteSwap(*(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16]);
227 csum -= (pi.ip4_length - pi.ip4_header_length);
228 while (csum & 0xFFFF0000)
229 csum = (csum & 0xFFFF) + (csum >> 16);
230 *(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16] = ~RtlUshortByteSwap((USHORT)csum);
231 }
232 /*
233 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
234 * (C) rest of requests on the ring. Only (A) has csum flags.
235 */
237 /* (A) */
238 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
239 XN_ASSERT(tx0); /* this will never happen */
240 tx0->flags = flags;
241 tx_length += pi.header_length;
243 /* lso implies IpHeaderChecksum */
244 #if NTDDI_VERSION < NTDDI_VISTA
245 if (ndis_lso) {
246 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
247 }
248 #else
249 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
250 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
251 }
252 #endif
253 txN = tx0;
255 /* (B) */
256 if (xen_gso) {
257 XN_ASSERT(flags & NETTXF_extra_info);
258 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
259 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
260 xi->tx_ring.req_prod_pvt++;
261 XN_ASSERT(xi->tx_ring_free);
262 xi->tx_ring_free--;
263 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
264 ei->flags = 0;
265 ei->u.gso.size = (USHORT)mss;
266 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
267 ei->u.gso.pad = 0;
268 ei->u.gso.features = 0;
269 }
271 XN_ASSERT(xi->current_sg_supported || !remaining);
273 /* (C) - only if data is remaining */
274 coalesce_buf = NULL;
275 while (remaining > 0) {
276 ULONG length;
277 PFN_NUMBER pfn;
279 XN_ASSERT(pi.curr_mdl);
280 if (coalesce_required) {
281 PVOID va;
282 if (!coalesce_buf) {
283 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
284 if (gref == INVALID_GRANT_REF) {
285 FUNCTION_MSG("out of grefs - partial send\n");
286 break;
287 }
288 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
289 if (!coalesce_buf) {
290 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
291 FUNCTION_MSG("out of memory - partial send\n");
292 break;
293 }
294 coalesce_remaining = min(PAGE_SIZE, remaining);
295 }
296 length = XenNet_QueryData(&pi, coalesce_remaining);
297 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
298 if (!va) {
299 FUNCTION_MSG("failed to map buffer va - partial send\n");
300 coalesce_remaining = 0;
301 remaining -= min(PAGE_SIZE, remaining);
302 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
303 } else {
304 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
305 coalesce_remaining -= length;
306 }
307 } else {
308 length = XenNet_QueryData(&pi, PAGE_SIZE);
309 }
310 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
311 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
312 continue;
313 }
315 if (coalesce_buf) {
316 if (remaining) {
317 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
318 XN_ASSERT(txN);
319 coalesce_buf = NULL;
320 tx_length += min(PAGE_SIZE, remaining);
321 remaining -= min(PAGE_SIZE, remaining);
322 }
323 } else {
324 ULONG offset;
326 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
327 if (gref == INVALID_GRANT_REF) {
328 FUNCTION_MSG("out of grefs - partial send\n");
329 break;
330 }
331 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
332 xi->tx_ring.req_prod_pvt++;
333 XN_ASSERT(xi->tx_ring_free);
334 xi->tx_ring_free--;
335 txN->id = get_id_from_freelist(xi);
336 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
337 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
338 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
339 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
340 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
341 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
342 xi->tx_shadows[txN->id].gref = txN->gref;
343 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
344 txN->size = (USHORT)length;
345 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
346 XN_ASSERT(txN->size);
347 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
348 remaining -= length;
349 tx_length += length;
350 }
351 tx0->size = tx0->size + txN->size;
352 txN->flags = NETTXF_more_data;
353 XenNet_EatData(&pi, length);
354 }
355 txN->flags &= ~NETTXF_more_data;
356 XN_ASSERT(tx0->size == pi.total_length);
357 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
358 xi->tx_shadows[txN->id].packet = packet;
360 #if NTDDI_VERSION < NTDDI_VISTA
361 if (ndis_lso) {
362 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
363 }
364 #else
365 switch (lso_info.Transmit.Type) {
366 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
367 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
368 break;
369 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
370 break;
371 }
372 #endif
374 xi->tx_outstanding++;
375 return TRUE;
376 }
378 /* Called at DISPATCH_LEVEL with tx_lock held */
379 static VOID
380 XenNet_SendQueuedPackets(struct xennet_info *xi)
381 {
382 PLIST_ENTRY entry;
383 #if NTDDI_VERSION < NTDDI_VISTA
384 PNDIS_PACKET packet;
385 #else
386 PNET_BUFFER packet;
387 #endif
388 int notify;
390 if (xi->device_state != DEVICE_STATE_ACTIVE)
391 return;
393 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
394 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
395 #if NTDDI_VERSION < NTDDI_VISTA
396 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
397 #else
398 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
399 #endif
400 if (!XenNet_HWSendPacket(xi, packet)) {
401 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
402 break;
403 }
404 }
406 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
407 if (notify) {
408 XnNotify(xi->handle, xi->event_channel);
409 }
410 }
412 // Called at DISPATCH_LEVEL
413 VOID
414 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
415 RING_IDX cons, prod;
416 #if NTDDI_VERSION < NTDDI_VISTA
417 PNDIS_PACKET head = NULL, tail = NULL;
418 PNDIS_PACKET packet;
419 #else
420 PNET_BUFFER_LIST head = NULL;
421 PNET_BUFFER_LIST tail = NULL;
422 PNET_BUFFER_LIST nbl;
423 PNET_BUFFER packet;
424 #endif
425 ULONG tx_packets = 0;
427 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
429 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
431 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
432 /* there is a chance that our Dpc had been queued just before the shutdown... */
433 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
434 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
435 return;
436 }
438 do {
439 prod = xi->tx_ring.sring->rsp_prod;
440 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
442 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
443 {
444 struct netif_tx_response *txrsp;
445 tx_shadow_t *shadow;
447 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
449 xi->tx_ring_free++;
451 if (txrsp->status == NETIF_RSP_NULL) {
452 continue;
453 }
455 shadow = &xi->tx_shadows[txrsp->id];
456 if (shadow->cb) {
457 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
458 shadow->cb = NULL;
459 }
461 if (shadow->gref != INVALID_GRANT_REF) {
462 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
463 shadow->gref = INVALID_GRANT_REF;
464 }
466 if (shadow->packet) {
467 PMDL mdl;
468 PUCHAR header;
469 packet = shadow->packet;
470 #if NTDDI_VERSION < NTDDI_VISTA
471 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
472 #else
473 mdl = NET_BUFFER_CURRENT_MDL(packet);
474 #endif
475 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
476 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
477 #if NTDDI_VERSION < NTDDI_VISTA
478 #else
479 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
480 #endif
482 #if NTDDI_VERSION < NTDDI_VISTA
483 #else
484 xi->stats.ifHCOutOctets += packet->DataLength;
485 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
486 /* unicast or tiny packet */
487 xi->stats.ifHCOutUcastPkts++;
488 xi->stats.ifHCOutUcastOctets += packet->DataLength;
489 }
490 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
491 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
492 /* broadcast */
493 xi->stats.ifHCOutBroadcastPkts++;
494 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
495 } else {
496 /* multicast */
497 xi->stats.ifHCOutMulticastPkts++;
498 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
499 }
500 #endif
502 #if NTDDI_VERSION < NTDDI_VISTA
503 PACKET_NEXT_PACKET(packet) = NULL;
504 if (!head) {
505 head = packet;
506 } else {
507 PACKET_NEXT_PACKET(tail) = packet;
508 }
509 tail = packet;
510 #else
511 nbl = NB_NBL(packet);
512 NBL_REF(nbl)--;
513 if (!NBL_REF(nbl)) {
514 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
515 if (head) {
516 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
517 tail = nbl;
518 } else {
519 head = nbl;
520 tail = nbl;
521 }
522 }
523 #endif
524 shadow->packet = NULL;
525 tx_packets++;
526 }
527 XN_ASSERT(xi->tx_shadows[txrsp->id].gref == INVALID_GRANT_REF);
528 XN_ASSERT(!xi->tx_shadows[txrsp->id].cb);
529 put_id_on_freelist(xi, txrsp->id);
530 }
532 xi->tx_ring.rsp_cons = prod;
533 /* resist the temptation to set the event more than +1... it breaks things */
534 if (!dont_set_event)
535 xi->tx_ring.sring->rsp_event = prod + 1;
536 KeMemoryBarrier();
537 } while (prod != xi->tx_ring.sring->rsp_prod);
539 /* if queued packets, send them now */
540 XenNet_SendQueuedPackets(xi);
542 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
544 /* must be done without holding any locks */
545 #if NTDDI_VERSION < NTDDI_VISTA
546 while (head) {
547 packet = (PNDIS_PACKET)head;
548 head = PACKET_NEXT_PACKET(packet);
549 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
550 }
551 #else
552 if (head)
553 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
554 #endif
556 /* must be done after we have truly given back all packets */
557 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
558 xi->tx_outstanding -= tx_packets;
559 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
560 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
561 }
562 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
563 }
565 #if NTDDI_VERSION < NTDDI_VISTA
566 VOID
567 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
568 struct xennet_info *xi = MiniportAdapterContext;
569 PNDIS_PACKET packet;
570 UINT i;
571 PLIST_ENTRY entry;
572 KIRQL old_irql;
574 if (xi->device_state != DEVICE_STATE_ACTIVE) {
575 for (i = 0; i < NumberOfPackets; i++) {
576 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
577 }
578 return;
579 }
581 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
583 for (i = 0; i < NumberOfPackets; i++) {
584 packet = PacketArray[i];
585 XN_ASSERT(packet);
586 entry = &PACKET_LIST_ENTRY(packet);
587 InsertTailList(&xi->tx_waiting_pkt_list, entry);
588 }
590 XenNet_SendQueuedPackets(xi);
592 KeReleaseSpinLock(&xi->tx_lock, old_irql);
593 }
594 #else
595 // called at <= DISPATCH_LEVEL
596 VOID
597 XenNet_SendNetBufferLists(
598 NDIS_HANDLE adapter_context,
599 PNET_BUFFER_LIST nb_lists,
600 NDIS_PORT_NUMBER port_number,
601 ULONG send_flags) {
602 struct xennet_info *xi = adapter_context;
603 PLIST_ENTRY nb_entry;
604 KIRQL old_irql;
605 PNET_BUFFER_LIST curr_nbl;
606 PNET_BUFFER_LIST next_nbl;
608 UNREFERENCED_PARAMETER(port_number);
610 if (xi->device_state == DEVICE_STATE_INACTIVE) {
611 curr_nbl = nb_lists;
612 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
613 curr_nbl->Status = NDIS_STATUS_FAILURE;
614 }
615 /* this actions the whole list */
616 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
617 return;
618 }
620 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
622 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
623 PNET_BUFFER curr_nb;
624 NBL_REF(curr_nbl) = 0;
625 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
626 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
627 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
628 NB_NBL(curr_nb) = curr_nbl;
629 nb_entry = &NB_LIST_ENTRY(curr_nb);
630 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
631 NBL_REF(curr_nbl)++;
632 }
633 }
635 XenNet_SendQueuedPackets(xi);
637 KeReleaseSpinLock(&xi->tx_lock, old_irql);
638 }
639 #endif
641 VOID
642 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
643 {
644 UNREFERENCED_PARAMETER(adapter_context);
645 UNREFERENCED_PARAMETER(cancel_id);
646 FUNCTION_ENTER();
648 FUNCTION_EXIT();
649 }
651 BOOLEAN
652 XenNet_TxInit(xennet_info_t *xi) {
653 USHORT i;
654 UNREFERENCED_PARAMETER(xi);
656 KeInitializeSpinLock(&xi->tx_lock);
657 InitializeListHead(&xi->tx_waiting_pkt_list);
659 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
660 xi->tx_outstanding = 0;
661 xi->tx_ring_free = NET_TX_RING_SIZE;
663 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
664 PAGE_SIZE, XENNET_POOL_TAG, 0);
666 xi->tx_id_free = 0;
667 for (i = 0; i < NET_TX_RING_SIZE; i++) {
668 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
669 xi->tx_shadows[i].cb = NULL;
670 put_id_on_freelist(xi, i);
671 }
673 return TRUE;
674 }
676 /*
677 The ring is completely closed down now. We just need to empty anything left
678 on our freelists and harvest anything left on the rings.
679 */
681 BOOLEAN
682 XenNet_TxShutdown(xennet_info_t *xi) {
683 #if NTDDI_VERSION < NTDDI_VISTA
684 PNDIS_PACKET packet;
685 #else
686 PNET_BUFFER packet;
687 PNET_BUFFER_LIST nbl;
688 #endif
689 PLIST_ENTRY entry;
690 LARGE_INTEGER timeout;
691 KIRQL old_irql;
693 FUNCTION_ENTER();
695 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
697 while (xi->tx_outstanding) {
698 KeReleaseSpinLock(&xi->tx_lock, old_irql);
699 FUNCTION_MSG("Waiting for %d remaining packets to be sent\n", xi->tx_outstanding);
700 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
701 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
702 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
703 }
704 KeReleaseSpinLock(&xi->tx_lock, old_irql);
706 /* Free packets in tx queue */
707 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
708 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
709 #if NTDDI_VERSION < NTDDI_VISTA
710 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
711 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
712 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
713 #else
714 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
715 nbl = NB_NBL(packet);
716 NBL_REF(nbl)--;
717 if (!NBL_REF(nbl)) {
718 nbl->Status = NDIS_STATUS_FAILURE;
719 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
720 }
721 #endif
722 }
723 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
725 FUNCTION_EXIT();
727 return TRUE;
728 }