win-pvdrivers

view xennet/xennet_tx.c @ 1084:c94174bbf195

Add another assert to catch a bsd problem
author James Harper <james.harper@bendigoit.com.au>
date Thu Dec 12 19:54:33 2013 +1100 (2013-12-12)
parents a60d401aa020
children 27bd2a5a4704
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
24 static USHORT
25 get_id_from_freelist(struct xennet_info *xi)
26 {
27 XN_ASSERT(xi->tx_id_free);
28 xi->tx_id_free--;
30 return xi->tx_id_list[xi->tx_id_free];
31 }
33 static VOID
34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
35 {
36 XN_ASSERT(id >= 0 && id < NET_TX_RING_SIZE);
37 xi->tx_id_list[xi->tx_id_free] = id;
38 xi->tx_id_free++;
39 }
41 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
43 static __forceinline struct netif_tx_request *
44 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
45 {
46 struct netif_tx_request *tx;
47 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
48 xi->tx_ring.req_prod_pvt++;
49 XN_ASSERT(xi->tx_ring_free);
50 xi->tx_ring_free--;
51 tx->id = get_id_from_freelist(xi);
52 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
53 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
54 xi->tx_shadows[tx->id].cb = coalesce_buf;
55 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
56 xi->tx_shadows[tx->id].gref = tx->gref;
57 tx->offset = 0;
58 tx->size = (USHORT)length;
59 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
60 XN_ASSERT(tx->size);
61 return tx;
62 }
64 #if 0
65 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
66 UINT mdl_count;
67 PMDL first_mdl;
68 UINT total_length;
70 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
71 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
72 }
73 #endif
75 /* Called at DISPATCH_LEVEL with tx_lock held */
76 /*
77 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
78 */
79 #if NTDDI_VERSION < NTDDI_VISTA
80 static BOOLEAN
81 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
82 #else
83 static BOOLEAN
84 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
85 #endif
86 struct netif_tx_request *tx0 = NULL;
87 struct netif_tx_request *txN = NULL;
88 struct netif_extra_info *ei = NULL;
89 ULONG mss = 0;
90 #if NTDDI_VERSION < NTDDI_VISTA
91 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
92 UINT mdl_count;
93 #else
94 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
95 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
96 #endif
97 uint16_t flags = NETTXF_more_data;
98 packet_info_t pi;
99 BOOLEAN ndis_lso = FALSE;
100 BOOLEAN xen_gso = FALSE;
101 ULONG remaining;
102 ULONG frags = 0;
103 BOOLEAN coalesce_required = FALSE;
104 PVOID coalesce_buf;
105 ULONG coalesce_remaining = 0;
106 grant_ref_t gref;
107 ULONG tx_length = 0;
109 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
110 if (gref == INVALID_GRANT_REF)
111 {
112 FUNCTION_MSG("out of grefs\n");
113 return FALSE;
114 }
115 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
116 if (!coalesce_buf)
117 {
118 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
119 FUNCTION_MSG("out of memory\n");
120 return FALSE;
121 }
122 XenNet_ClearPacketInfo(&pi);
123 #if NTDDI_VERSION < NTDDI_VISTA
124 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
125 pi.curr_mdl = pi.first_mdl;
126 #else
127 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
128 IoBuildPartialMdl(packet->CurrentMdl,
129 &pi.first_mdl_storage,
130 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
131 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
132 pi.total_length = packet->DataLength;
133 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
134 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
135 #endif
136 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
137 remaining = min(pi.total_length, PAGE_SIZE);
138 while (remaining) { /* this much gets put in the header */
139 ULONG length = XenNet_QueryData(&pi, remaining);
140 remaining -= length;
141 XenNet_EatData(&pi, length);
142 }
143 frags++;
144 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
145 remaining = pi.total_length - PAGE_SIZE;
146 while (remaining) {
147 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
148 if (length != 0) {
149 frags++;
150 if (frags > LINUX_MAX_SG_ELEMENTS)
151 break; /* worst case there could be hundreds of fragments - leave the loop now */
152 }
153 remaining -= length;
154 XenNet_EatData(&pi, length);
155 }
156 }
157 if (frags > LINUX_MAX_SG_ELEMENTS) {
158 frags = LINUX_MAX_SG_ELEMENTS;
159 coalesce_required = TRUE;
160 }
162 /* if we have enough space on the ring then we have enough id's so no need to check for that */
163 if (xi->tx_ring_free < frags + 1) {
164 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
165 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
166 //FUNCTION_MSG("Full on send - ring full\n");
167 return FALSE;
168 }
169 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
170 remaining = pi.total_length - pi.header_length;
171 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
172 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
173 }
175 #if NTDDI_VERSION < NTDDI_VISTA
176 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
177 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
178 packet, TcpIpChecksumPacketInfo);
179 if (csum_info->Transmit.NdisPacketChecksumV4) {
180 if (csum_info->Transmit.NdisPacketTcpChecksum) {
181 flags |= NETTXF_csum_blank | NETTXF_data_validated;
182 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
183 flags |= NETTXF_csum_blank | NETTXF_data_validated;
184 }
185 }
186 }
187 #else
188 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
189 if (csum_info.Transmit.IsIPv4) {
190 if (csum_info.Transmit.TcpChecksum) {
191 flags |= NETTXF_csum_blank | NETTXF_data_validated;
192 } else if (csum_info.Transmit.UdpChecksum) {
193 flags |= NETTXF_csum_blank | NETTXF_data_validated;
194 }
195 } else if (csum_info.Transmit.IsIPv6) {
196 FUNCTION_MSG("Transmit.IsIPv6 not supported\n");
197 }
198 #endif
200 #if NTDDI_VERSION < NTDDI_VISTA
201 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
202 #else
203 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
204 switch (lso_info.Transmit.Type) {
205 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
206 mss = lso_info.LsoV1Transmit.MSS;
207 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
208 break;
209 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
210 mss = lso_info.LsoV2Transmit.MSS;
211 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
212 break;
213 }
214 #endif
215 if (mss && pi.parse_result == PARSE_OK) {
216 ndis_lso = TRUE;
217 }
219 if (ndis_lso) {
220 ULONG csum;
221 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
222 if (pi.tcp_length >= mss) {
223 flags |= NETTXF_extra_info;
224 xen_gso = TRUE;
225 }
226 /* Adjust pseudoheader checksum to be what Linux expects (remove the tcp_length) */
227 csum = ~RtlUshortByteSwap(*(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16]);
228 csum -= (pi.ip4_length - pi.ip4_header_length);
229 while (csum & 0xFFFF0000)
230 csum = (csum & 0xFFFF) + (csum >> 16);
231 *(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16] = ~RtlUshortByteSwap((USHORT)csum);
232 }
233 /*
234 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
235 * (C) rest of requests on the ring. Only (A) has csum flags.
236 */
238 /* (A) */
239 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
240 XN_ASSERT(tx0); /* this will never happen */
241 tx0->flags = flags;
242 tx_length += pi.header_length;
244 /* lso implies IpHeaderChecksum */
245 #if NTDDI_VERSION < NTDDI_VISTA
246 if (ndis_lso) {
247 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
248 }
249 #else
250 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
251 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
252 }
253 #endif
254 txN = tx0;
256 /* (B) */
257 if (xen_gso) {
258 XN_ASSERT(flags & NETTXF_extra_info);
259 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
260 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
261 xi->tx_ring.req_prod_pvt++;
262 XN_ASSERT(xi->tx_ring_free);
263 xi->tx_ring_free--;
264 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
265 ei->flags = 0;
266 ei->u.gso.size = (USHORT)mss;
267 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
268 ei->u.gso.pad = 0;
269 ei->u.gso.features = 0;
270 }
272 XN_ASSERT(xi->current_sg_supported || !remaining);
274 /* (C) - only if data is remaining */
275 coalesce_buf = NULL;
276 while (remaining > 0) {
277 ULONG length;
278 PFN_NUMBER pfn;
280 XN_ASSERT(pi.curr_mdl);
281 if (coalesce_required) {
282 PVOID va;
283 if (!coalesce_buf) {
284 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
285 if (gref == INVALID_GRANT_REF) {
286 FUNCTION_MSG("out of grefs - partial send\n");
287 break;
288 }
289 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
290 if (!coalesce_buf) {
291 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
292 FUNCTION_MSG("out of memory - partial send\n");
293 break;
294 }
295 coalesce_remaining = min(PAGE_SIZE, remaining);
296 }
297 length = XenNet_QueryData(&pi, coalesce_remaining);
298 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
299 if (!va) {
300 FUNCTION_MSG("failed to map buffer va - partial send\n");
301 coalesce_remaining = 0;
302 remaining -= min(PAGE_SIZE, remaining);
303 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
304 } else {
305 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
306 coalesce_remaining -= length;
307 }
308 } else {
309 length = XenNet_QueryData(&pi, PAGE_SIZE);
310 }
311 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
312 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
313 continue;
314 }
316 if (coalesce_buf) {
317 if (remaining) {
318 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
319 XN_ASSERT(txN);
320 coalesce_buf = NULL;
321 tx_length += min(PAGE_SIZE, remaining);
322 remaining -= min(PAGE_SIZE, remaining);
323 }
324 } else {
325 ULONG offset;
327 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
328 if (gref == INVALID_GRANT_REF) {
329 FUNCTION_MSG("out of grefs - partial send\n");
330 break;
331 }
332 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
333 xi->tx_ring.req_prod_pvt++;
334 XN_ASSERT(xi->tx_ring_free);
335 xi->tx_ring_free--;
336 txN->id = get_id_from_freelist(xi);
337 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
338 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
339 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
340 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
341 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
342 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
343 xi->tx_shadows[txN->id].gref = txN->gref;
344 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
345 txN->size = (USHORT)length;
346 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
347 XN_ASSERT(txN->size);
348 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
349 remaining -= length;
350 tx_length += length;
351 }
352 tx0->size = tx0->size + txN->size;
353 txN->flags = NETTXF_more_data;
354 XenNet_EatData(&pi, length);
355 }
356 txN->flags &= ~NETTXF_more_data;
357 XN_ASSERT(tx0->size == pi.total_length);
358 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
359 xi->tx_shadows[txN->id].packet = packet;
361 #if NTDDI_VERSION < NTDDI_VISTA
362 if (ndis_lso) {
363 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
364 }
365 #else
366 switch (lso_info.Transmit.Type) {
367 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
368 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
369 break;
370 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
371 break;
372 }
373 #endif
375 xi->tx_outstanding++;
376 return TRUE;
377 }
379 /* Called at DISPATCH_LEVEL with tx_lock held */
380 static VOID
381 XenNet_SendQueuedPackets(struct xennet_info *xi)
382 {
383 PLIST_ENTRY entry;
384 #if NTDDI_VERSION < NTDDI_VISTA
385 PNDIS_PACKET packet;
386 #else
387 PNET_BUFFER packet;
388 #endif
389 int notify;
391 if (xi->device_state != DEVICE_STATE_ACTIVE)
392 return;
394 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
395 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
396 #if NTDDI_VERSION < NTDDI_VISTA
397 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
398 #else
399 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
400 #endif
401 if (!XenNet_HWSendPacket(xi, packet)) {
402 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
403 break;
404 }
405 }
407 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
408 if (notify) {
409 XnNotify(xi->handle, xi->event_channel);
410 }
411 }
413 // Called at DISPATCH_LEVEL
414 VOID
415 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
416 RING_IDX cons, prod;
417 #if NTDDI_VERSION < NTDDI_VISTA
418 PNDIS_PACKET head = NULL, tail = NULL;
419 PNDIS_PACKET packet;
420 #else
421 PNET_BUFFER_LIST head = NULL;
422 PNET_BUFFER_LIST tail = NULL;
423 PNET_BUFFER_LIST nbl;
424 PNET_BUFFER packet;
425 #endif
426 ULONG tx_packets = 0;
428 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
430 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
432 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
433 /* there is a chance that our Dpc had been queued just before the shutdown... */
434 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
435 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
436 return;
437 }
439 do {
440 prod = xi->tx_ring.sring->rsp_prod;
441 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
443 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
444 {
445 struct netif_tx_response *txrsp;
446 tx_shadow_t *shadow;
448 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
450 xi->tx_ring_free++;
452 if (txrsp->status == NETIF_RSP_NULL) {
453 continue;
454 }
456 shadow = &xi->tx_shadows[txrsp->id];
457 if (shadow->cb) {
458 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
459 shadow->cb = NULL;
460 }
462 if (shadow->gref != INVALID_GRANT_REF) {
463 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
464 shadow->gref = INVALID_GRANT_REF;
465 }
467 if (shadow->packet) {
468 PMDL mdl;
469 PUCHAR header;
470 packet = shadow->packet;
471 #if NTDDI_VERSION < NTDDI_VISTA
472 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
473 #else
474 mdl = NET_BUFFER_CURRENT_MDL(packet);
475 #endif
476 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
477 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
478 #if NTDDI_VERSION < NTDDI_VISTA
479 #else
480 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
481 #endif
483 #if NTDDI_VERSION < NTDDI_VISTA
484 #else
485 xi->stats.ifHCOutOctets += packet->DataLength;
486 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
487 /* unicast or tiny packet */
488 xi->stats.ifHCOutUcastPkts++;
489 xi->stats.ifHCOutUcastOctets += packet->DataLength;
490 }
491 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
492 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
493 /* broadcast */
494 xi->stats.ifHCOutBroadcastPkts++;
495 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
496 } else {
497 /* multicast */
498 xi->stats.ifHCOutMulticastPkts++;
499 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
500 }
501 #endif
503 #if NTDDI_VERSION < NTDDI_VISTA
504 PACKET_NEXT_PACKET(packet) = NULL;
505 if (!head) {
506 head = packet;
507 } else {
508 PACKET_NEXT_PACKET(tail) = packet;
509 }
510 tail = packet;
511 #else
512 nbl = NB_NBL(packet);
513 NBL_REF(nbl)--;
514 if (!NBL_REF(nbl)) {
515 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
516 if (head) {
517 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
518 tail = nbl;
519 } else {
520 head = nbl;
521 tail = nbl;
522 }
523 }
524 #endif
525 shadow->packet = NULL;
526 tx_packets++;
527 }
528 XN_ASSERT(xi->tx_shadows[txrsp->id].gref == INVALID_GRANT_REF);
529 XN_ASSERT(!xi->tx_shadows[txrsp->id].cb);
530 put_id_on_freelist(xi, txrsp->id);
531 }
533 xi->tx_ring.rsp_cons = prod;
534 /* resist the temptation to set the event more than +1... it breaks things */
535 if (!dont_set_event)
536 xi->tx_ring.sring->rsp_event = prod + 1;
537 KeMemoryBarrier();
538 } while (prod != xi->tx_ring.sring->rsp_prod);
540 /* if queued packets, send them now */
541 XenNet_SendQueuedPackets(xi);
543 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
545 /* must be done without holding any locks */
546 #if NTDDI_VERSION < NTDDI_VISTA
547 while (head) {
548 packet = (PNDIS_PACKET)head;
549 head = PACKET_NEXT_PACKET(packet);
550 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
551 }
552 #else
553 if (head)
554 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
555 #endif
557 /* must be done after we have truly given back all packets */
558 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
559 xi->tx_outstanding -= tx_packets;
560 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
561 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
562 }
563 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
564 }
566 #if NTDDI_VERSION < NTDDI_VISTA
567 VOID
568 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
569 struct xennet_info *xi = MiniportAdapterContext;
570 PNDIS_PACKET packet;
571 UINT i;
572 PLIST_ENTRY entry;
573 KIRQL old_irql;
575 if (xi->device_state != DEVICE_STATE_ACTIVE) {
576 for (i = 0; i < NumberOfPackets; i++) {
577 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
578 }
579 return;
580 }
582 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
584 for (i = 0; i < NumberOfPackets; i++) {
585 packet = PacketArray[i];
586 XN_ASSERT(packet);
587 entry = &PACKET_LIST_ENTRY(packet);
588 InsertTailList(&xi->tx_waiting_pkt_list, entry);
589 }
591 XenNet_SendQueuedPackets(xi);
593 KeReleaseSpinLock(&xi->tx_lock, old_irql);
594 }
595 #else
596 // called at <= DISPATCH_LEVEL
597 VOID
598 XenNet_SendNetBufferLists(
599 NDIS_HANDLE adapter_context,
600 PNET_BUFFER_LIST nb_lists,
601 NDIS_PORT_NUMBER port_number,
602 ULONG send_flags) {
603 struct xennet_info *xi = adapter_context;
604 PLIST_ENTRY nb_entry;
605 KIRQL old_irql;
606 PNET_BUFFER_LIST curr_nbl;
607 PNET_BUFFER_LIST next_nbl;
609 UNREFERENCED_PARAMETER(port_number);
611 if (xi->device_state == DEVICE_STATE_INACTIVE) {
612 curr_nbl = nb_lists;
613 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
614 curr_nbl->Status = NDIS_STATUS_FAILURE;
615 }
616 /* this actions the whole list */
617 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
618 return;
619 }
621 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
623 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
624 PNET_BUFFER curr_nb;
625 NBL_REF(curr_nbl) = 0;
626 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
627 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
628 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
629 NB_NBL(curr_nb) = curr_nbl;
630 nb_entry = &NB_LIST_ENTRY(curr_nb);
631 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
632 NBL_REF(curr_nbl)++;
633 }
634 }
636 XenNet_SendQueuedPackets(xi);
638 KeReleaseSpinLock(&xi->tx_lock, old_irql);
639 }
640 #endif
642 VOID
643 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
644 {
645 UNREFERENCED_PARAMETER(adapter_context);
646 UNREFERENCED_PARAMETER(cancel_id);
647 FUNCTION_ENTER();
649 FUNCTION_EXIT();
650 }
652 BOOLEAN
653 XenNet_TxInit(xennet_info_t *xi) {
654 USHORT i;
655 UNREFERENCED_PARAMETER(xi);
657 KeInitializeSpinLock(&xi->tx_lock);
658 InitializeListHead(&xi->tx_waiting_pkt_list);
660 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
661 xi->tx_outstanding = 0;
662 xi->tx_ring_free = NET_TX_RING_SIZE;
664 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
665 PAGE_SIZE, XENNET_POOL_TAG, 0);
667 xi->tx_id_free = 0;
668 for (i = 0; i < NET_TX_RING_SIZE; i++) {
669 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
670 xi->tx_shadows[i].cb = NULL;
671 put_id_on_freelist(xi, i);
672 }
674 return TRUE;
675 }
677 /*
678 The ring is completely closed down now. We just need to empty anything left
679 on our freelists and harvest anything left on the rings.
680 */
682 BOOLEAN
683 XenNet_TxShutdown(xennet_info_t *xi) {
684 #if NTDDI_VERSION < NTDDI_VISTA
685 PNDIS_PACKET packet;
686 #else
687 PNET_BUFFER packet;
688 PNET_BUFFER_LIST nbl;
689 #endif
690 PLIST_ENTRY entry;
691 LARGE_INTEGER timeout;
692 KIRQL old_irql;
694 FUNCTION_ENTER();
696 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
698 while (xi->tx_outstanding) {
699 KeReleaseSpinLock(&xi->tx_lock, old_irql);
700 FUNCTION_MSG("Waiting for %d remaining packets to be sent\n", xi->tx_outstanding);
701 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
702 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
703 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
704 }
705 KeReleaseSpinLock(&xi->tx_lock, old_irql);
707 /* Free packets in tx queue */
708 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
709 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
710 #if NTDDI_VERSION < NTDDI_VISTA
711 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
712 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
713 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
714 #else
715 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
716 nbl = NB_NBL(packet);
717 NBL_REF(nbl)--;
718 if (!NBL_REF(nbl)) {
719 nbl->Status = NDIS_STATUS_FAILURE;
720 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
721 }
722 #endif
723 }
724 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
726 FUNCTION_EXIT();
728 return TRUE;
729 }