win-pvdrivers

view xennet/xennet_tx.c @ 1099:27bd2a5a4704

License change from GPL to BSD
author James Harper <james.harper@bendigoit.com.au>
date Thu Mar 13 13:38:31 2014 +1100 (2014-03-13)
parents c94174bbf195
children
line source
1 /*
2 PV Drivers for Windows Xen HVM Domains
4 Copyright (c) 2014, James Harper
5 All rights reserved.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11 * Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 * Neither the name of James Harper nor the
15 names of its contributors may be used to endorse or promote products
16 derived from this software without specific prior written permission.
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 DISCLAIMED. IN NO EVENT SHALL JAMES HARPER BE LIABLE FOR ANY
22 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
30 #include "xennet.h"
33 static USHORT
34 get_id_from_freelist(struct xennet_info *xi)
35 {
36 XN_ASSERT(xi->tx_id_free);
37 xi->tx_id_free--;
39 return xi->tx_id_list[xi->tx_id_free];
40 }
42 static VOID
43 put_id_on_freelist(struct xennet_info *xi, USHORT id)
44 {
45 XN_ASSERT(id >= 0 && id < NET_TX_RING_SIZE);
46 xi->tx_id_list[xi->tx_id_free] = id;
47 xi->tx_id_free++;
48 }
50 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
52 static __forceinline struct netif_tx_request *
53 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
54 {
55 struct netif_tx_request *tx;
56 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
57 xi->tx_ring.req_prod_pvt++;
58 XN_ASSERT(xi->tx_ring_free);
59 xi->tx_ring_free--;
60 tx->id = get_id_from_freelist(xi);
61 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
62 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
63 xi->tx_shadows[tx->id].cb = coalesce_buf;
64 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
65 xi->tx_shadows[tx->id].gref = tx->gref;
66 tx->offset = 0;
67 tx->size = (USHORT)length;
68 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
69 XN_ASSERT(tx->size);
70 return tx;
71 }
73 #if 0
74 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
75 UINT mdl_count;
76 PMDL first_mdl;
77 UINT total_length;
79 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
80 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
81 }
82 #endif
84 /* Called at DISPATCH_LEVEL with tx_lock held */
85 /*
86 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
87 */
88 #if NTDDI_VERSION < NTDDI_VISTA
89 static BOOLEAN
90 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
91 #else
92 static BOOLEAN
93 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
94 #endif
95 struct netif_tx_request *tx0 = NULL;
96 struct netif_tx_request *txN = NULL;
97 struct netif_extra_info *ei = NULL;
98 ULONG mss = 0;
99 #if NTDDI_VERSION < NTDDI_VISTA
100 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
101 UINT mdl_count;
102 #else
103 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
104 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
105 #endif
106 uint16_t flags = NETTXF_more_data;
107 packet_info_t pi;
108 BOOLEAN ndis_lso = FALSE;
109 BOOLEAN xen_gso = FALSE;
110 ULONG remaining;
111 ULONG frags = 0;
112 BOOLEAN coalesce_required = FALSE;
113 PVOID coalesce_buf;
114 ULONG coalesce_remaining = 0;
115 grant_ref_t gref;
116 ULONG tx_length = 0;
118 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
119 if (gref == INVALID_GRANT_REF)
120 {
121 FUNCTION_MSG("out of grefs\n");
122 return FALSE;
123 }
124 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
125 if (!coalesce_buf)
126 {
127 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
128 FUNCTION_MSG("out of memory\n");
129 return FALSE;
130 }
131 XenNet_ClearPacketInfo(&pi);
132 #if NTDDI_VERSION < NTDDI_VISTA
133 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
134 pi.curr_mdl = pi.first_mdl;
135 #else
136 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
137 IoBuildPartialMdl(packet->CurrentMdl,
138 &pi.first_mdl_storage,
139 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
140 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
141 pi.total_length = packet->DataLength;
142 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
143 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
144 #endif
145 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
146 remaining = min(pi.total_length, PAGE_SIZE);
147 while (remaining) { /* this much gets put in the header */
148 ULONG length = XenNet_QueryData(&pi, remaining);
149 remaining -= length;
150 XenNet_EatData(&pi, length);
151 }
152 frags++;
153 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
154 remaining = pi.total_length - PAGE_SIZE;
155 while (remaining) {
156 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
157 if (length != 0) {
158 frags++;
159 if (frags > LINUX_MAX_SG_ELEMENTS)
160 break; /* worst case there could be hundreds of fragments - leave the loop now */
161 }
162 remaining -= length;
163 XenNet_EatData(&pi, length);
164 }
165 }
166 if (frags > LINUX_MAX_SG_ELEMENTS) {
167 frags = LINUX_MAX_SG_ELEMENTS;
168 coalesce_required = TRUE;
169 }
171 /* if we have enough space on the ring then we have enough id's so no need to check for that */
172 if (xi->tx_ring_free < frags + 1) {
173 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
174 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
175 //FUNCTION_MSG("Full on send - ring full\n");
176 return FALSE;
177 }
178 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
179 remaining = pi.total_length - pi.header_length;
180 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
181 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
182 }
184 #if NTDDI_VERSION < NTDDI_VISTA
185 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
186 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
187 packet, TcpIpChecksumPacketInfo);
188 if (csum_info->Transmit.NdisPacketChecksumV4) {
189 if (csum_info->Transmit.NdisPacketTcpChecksum) {
190 flags |= NETTXF_csum_blank | NETTXF_data_validated;
191 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
192 flags |= NETTXF_csum_blank | NETTXF_data_validated;
193 }
194 }
195 }
196 #else
197 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
198 if (csum_info.Transmit.IsIPv4) {
199 if (csum_info.Transmit.TcpChecksum) {
200 flags |= NETTXF_csum_blank | NETTXF_data_validated;
201 } else if (csum_info.Transmit.UdpChecksum) {
202 flags |= NETTXF_csum_blank | NETTXF_data_validated;
203 }
204 } else if (csum_info.Transmit.IsIPv6) {
205 FUNCTION_MSG("Transmit.IsIPv6 not supported\n");
206 }
207 #endif
209 #if NTDDI_VERSION < NTDDI_VISTA
210 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
211 #else
212 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
213 switch (lso_info.Transmit.Type) {
214 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
215 mss = lso_info.LsoV1Transmit.MSS;
216 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
217 break;
218 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
219 mss = lso_info.LsoV2Transmit.MSS;
220 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
221 break;
222 }
223 #endif
224 if (mss && pi.parse_result == PARSE_OK) {
225 ndis_lso = TRUE;
226 }
228 if (ndis_lso) {
229 ULONG csum;
230 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
231 if (pi.tcp_length >= mss) {
232 flags |= NETTXF_extra_info;
233 xen_gso = TRUE;
234 }
235 /* Adjust pseudoheader checksum to be what Linux expects (remove the tcp_length) */
236 csum = ~RtlUshortByteSwap(*(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16]);
237 csum -= (pi.ip4_length - pi.ip4_header_length);
238 while (csum & 0xFFFF0000)
239 csum = (csum & 0xFFFF) + (csum >> 16);
240 *(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16] = ~RtlUshortByteSwap((USHORT)csum);
241 }
242 /*
243 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
244 * (C) rest of requests on the ring. Only (A) has csum flags.
245 */
247 /* (A) */
248 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
249 XN_ASSERT(tx0); /* this will never happen */
250 tx0->flags = flags;
251 tx_length += pi.header_length;
253 /* lso implies IpHeaderChecksum */
254 #if NTDDI_VERSION < NTDDI_VISTA
255 if (ndis_lso) {
256 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
257 }
258 #else
259 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
260 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
261 }
262 #endif
263 txN = tx0;
265 /* (B) */
266 if (xen_gso) {
267 XN_ASSERT(flags & NETTXF_extra_info);
268 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
269 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
270 xi->tx_ring.req_prod_pvt++;
271 XN_ASSERT(xi->tx_ring_free);
272 xi->tx_ring_free--;
273 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
274 ei->flags = 0;
275 ei->u.gso.size = (USHORT)mss;
276 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
277 ei->u.gso.pad = 0;
278 ei->u.gso.features = 0;
279 }
281 XN_ASSERT(xi->current_sg_supported || !remaining);
283 /* (C) - only if data is remaining */
284 coalesce_buf = NULL;
285 while (remaining > 0) {
286 ULONG length;
287 PFN_NUMBER pfn;
289 XN_ASSERT(pi.curr_mdl);
290 if (coalesce_required) {
291 PVOID va;
292 if (!coalesce_buf) {
293 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
294 if (gref == INVALID_GRANT_REF) {
295 FUNCTION_MSG("out of grefs - partial send\n");
296 break;
297 }
298 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
299 if (!coalesce_buf) {
300 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
301 FUNCTION_MSG("out of memory - partial send\n");
302 break;
303 }
304 coalesce_remaining = min(PAGE_SIZE, remaining);
305 }
306 length = XenNet_QueryData(&pi, coalesce_remaining);
307 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
308 if (!va) {
309 FUNCTION_MSG("failed to map buffer va - partial send\n");
310 coalesce_remaining = 0;
311 remaining -= min(PAGE_SIZE, remaining);
312 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
313 } else {
314 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
315 coalesce_remaining -= length;
316 }
317 } else {
318 length = XenNet_QueryData(&pi, PAGE_SIZE);
319 }
320 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
321 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
322 continue;
323 }
325 if (coalesce_buf) {
326 if (remaining) {
327 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
328 XN_ASSERT(txN);
329 coalesce_buf = NULL;
330 tx_length += min(PAGE_SIZE, remaining);
331 remaining -= min(PAGE_SIZE, remaining);
332 }
333 } else {
334 ULONG offset;
336 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
337 if (gref == INVALID_GRANT_REF) {
338 FUNCTION_MSG("out of grefs - partial send\n");
339 break;
340 }
341 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
342 xi->tx_ring.req_prod_pvt++;
343 XN_ASSERT(xi->tx_ring_free);
344 xi->tx_ring_free--;
345 txN->id = get_id_from_freelist(xi);
346 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
347 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
348 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
349 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
350 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
351 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
352 xi->tx_shadows[txN->id].gref = txN->gref;
353 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
354 txN->size = (USHORT)length;
355 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
356 XN_ASSERT(txN->size);
357 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
358 remaining -= length;
359 tx_length += length;
360 }
361 tx0->size = tx0->size + txN->size;
362 txN->flags = NETTXF_more_data;
363 XenNet_EatData(&pi, length);
364 }
365 txN->flags &= ~NETTXF_more_data;
366 XN_ASSERT(tx0->size == pi.total_length);
367 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
368 xi->tx_shadows[txN->id].packet = packet;
370 #if NTDDI_VERSION < NTDDI_VISTA
371 if (ndis_lso) {
372 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
373 }
374 #else
375 switch (lso_info.Transmit.Type) {
376 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
377 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
378 break;
379 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
380 break;
381 }
382 #endif
384 xi->tx_outstanding++;
385 return TRUE;
386 }
388 /* Called at DISPATCH_LEVEL with tx_lock held */
389 static VOID
390 XenNet_SendQueuedPackets(struct xennet_info *xi)
391 {
392 PLIST_ENTRY entry;
393 #if NTDDI_VERSION < NTDDI_VISTA
394 PNDIS_PACKET packet;
395 #else
396 PNET_BUFFER packet;
397 #endif
398 int notify;
400 if (xi->device_state != DEVICE_STATE_ACTIVE)
401 return;
403 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
404 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
405 #if NTDDI_VERSION < NTDDI_VISTA
406 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
407 #else
408 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
409 #endif
410 if (!XenNet_HWSendPacket(xi, packet)) {
411 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
412 break;
413 }
414 }
416 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
417 if (notify) {
418 XnNotify(xi->handle, xi->event_channel);
419 }
420 }
422 // Called at DISPATCH_LEVEL
423 VOID
424 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
425 RING_IDX cons, prod;
426 #if NTDDI_VERSION < NTDDI_VISTA
427 PNDIS_PACKET head = NULL, tail = NULL;
428 PNDIS_PACKET packet;
429 #else
430 PNET_BUFFER_LIST head = NULL;
431 PNET_BUFFER_LIST tail = NULL;
432 PNET_BUFFER_LIST nbl;
433 PNET_BUFFER packet;
434 #endif
435 ULONG tx_packets = 0;
437 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
439 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
441 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
442 /* there is a chance that our Dpc had been queued just before the shutdown... */
443 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
444 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
445 return;
446 }
448 do {
449 prod = xi->tx_ring.sring->rsp_prod;
450 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
452 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
453 {
454 struct netif_tx_response *txrsp;
455 tx_shadow_t *shadow;
457 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
459 xi->tx_ring_free++;
461 if (txrsp->status == NETIF_RSP_NULL) {
462 continue;
463 }
465 shadow = &xi->tx_shadows[txrsp->id];
466 if (shadow->cb) {
467 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
468 shadow->cb = NULL;
469 }
471 if (shadow->gref != INVALID_GRANT_REF) {
472 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
473 shadow->gref = INVALID_GRANT_REF;
474 }
476 if (shadow->packet) {
477 PMDL mdl;
478 PUCHAR header;
479 packet = shadow->packet;
480 #if NTDDI_VERSION < NTDDI_VISTA
481 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
482 #else
483 mdl = NET_BUFFER_CURRENT_MDL(packet);
484 #endif
485 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
486 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
487 #if NTDDI_VERSION < NTDDI_VISTA
488 #else
489 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
490 #endif
492 #if NTDDI_VERSION < NTDDI_VISTA
493 #else
494 xi->stats.ifHCOutOctets += packet->DataLength;
495 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
496 /* unicast or tiny packet */
497 xi->stats.ifHCOutUcastPkts++;
498 xi->stats.ifHCOutUcastOctets += packet->DataLength;
499 }
500 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
501 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
502 /* broadcast */
503 xi->stats.ifHCOutBroadcastPkts++;
504 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
505 } else {
506 /* multicast */
507 xi->stats.ifHCOutMulticastPkts++;
508 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
509 }
510 #endif
512 #if NTDDI_VERSION < NTDDI_VISTA
513 PACKET_NEXT_PACKET(packet) = NULL;
514 if (!head) {
515 head = packet;
516 } else {
517 PACKET_NEXT_PACKET(tail) = packet;
518 }
519 tail = packet;
520 #else
521 nbl = NB_NBL(packet);
522 NBL_REF(nbl)--;
523 if (!NBL_REF(nbl)) {
524 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
525 if (head) {
526 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
527 tail = nbl;
528 } else {
529 head = nbl;
530 tail = nbl;
531 }
532 }
533 #endif
534 shadow->packet = NULL;
535 tx_packets++;
536 }
537 XN_ASSERT(xi->tx_shadows[txrsp->id].gref == INVALID_GRANT_REF);
538 XN_ASSERT(!xi->tx_shadows[txrsp->id].cb);
539 put_id_on_freelist(xi, txrsp->id);
540 }
542 xi->tx_ring.rsp_cons = prod;
543 /* resist the temptation to set the event more than +1... it breaks things */
544 if (!dont_set_event)
545 xi->tx_ring.sring->rsp_event = prod + 1;
546 KeMemoryBarrier();
547 } while (prod != xi->tx_ring.sring->rsp_prod);
549 /* if queued packets, send them now */
550 XenNet_SendQueuedPackets(xi);
552 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
554 /* must be done without holding any locks */
555 #if NTDDI_VERSION < NTDDI_VISTA
556 while (head) {
557 packet = (PNDIS_PACKET)head;
558 head = PACKET_NEXT_PACKET(packet);
559 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
560 }
561 #else
562 if (head)
563 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
564 #endif
566 /* must be done after we have truly given back all packets */
567 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
568 xi->tx_outstanding -= tx_packets;
569 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
570 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
571 }
572 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
573 }
575 #if NTDDI_VERSION < NTDDI_VISTA
576 VOID
577 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
578 struct xennet_info *xi = MiniportAdapterContext;
579 PNDIS_PACKET packet;
580 UINT i;
581 PLIST_ENTRY entry;
582 KIRQL old_irql;
584 if (xi->device_state != DEVICE_STATE_ACTIVE) {
585 for (i = 0; i < NumberOfPackets; i++) {
586 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
587 }
588 return;
589 }
591 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
593 for (i = 0; i < NumberOfPackets; i++) {
594 packet = PacketArray[i];
595 XN_ASSERT(packet);
596 entry = &PACKET_LIST_ENTRY(packet);
597 InsertTailList(&xi->tx_waiting_pkt_list, entry);
598 }
600 XenNet_SendQueuedPackets(xi);
602 KeReleaseSpinLock(&xi->tx_lock, old_irql);
603 }
604 #else
605 // called at <= DISPATCH_LEVEL
606 VOID
607 XenNet_SendNetBufferLists(
608 NDIS_HANDLE adapter_context,
609 PNET_BUFFER_LIST nb_lists,
610 NDIS_PORT_NUMBER port_number,
611 ULONG send_flags) {
612 struct xennet_info *xi = adapter_context;
613 PLIST_ENTRY nb_entry;
614 KIRQL old_irql;
615 PNET_BUFFER_LIST curr_nbl;
616 PNET_BUFFER_LIST next_nbl;
618 UNREFERENCED_PARAMETER(port_number);
620 if (xi->device_state == DEVICE_STATE_INACTIVE) {
621 curr_nbl = nb_lists;
622 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
623 curr_nbl->Status = NDIS_STATUS_FAILURE;
624 }
625 /* this actions the whole list */
626 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
627 return;
628 }
630 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
632 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
633 PNET_BUFFER curr_nb;
634 NBL_REF(curr_nbl) = 0;
635 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
636 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
637 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
638 NB_NBL(curr_nb) = curr_nbl;
639 nb_entry = &NB_LIST_ENTRY(curr_nb);
640 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
641 NBL_REF(curr_nbl)++;
642 }
643 }
645 XenNet_SendQueuedPackets(xi);
647 KeReleaseSpinLock(&xi->tx_lock, old_irql);
648 }
649 #endif
651 VOID
652 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
653 {
654 UNREFERENCED_PARAMETER(adapter_context);
655 UNREFERENCED_PARAMETER(cancel_id);
656 FUNCTION_ENTER();
658 FUNCTION_EXIT();
659 }
661 BOOLEAN
662 XenNet_TxInit(xennet_info_t *xi) {
663 USHORT i;
664 UNREFERENCED_PARAMETER(xi);
666 KeInitializeSpinLock(&xi->tx_lock);
667 InitializeListHead(&xi->tx_waiting_pkt_list);
669 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
670 xi->tx_outstanding = 0;
671 xi->tx_ring_free = NET_TX_RING_SIZE;
673 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
674 PAGE_SIZE, XENNET_POOL_TAG, 0);
676 xi->tx_id_free = 0;
677 for (i = 0; i < NET_TX_RING_SIZE; i++) {
678 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
679 xi->tx_shadows[i].cb = NULL;
680 put_id_on_freelist(xi, i);
681 }
683 return TRUE;
684 }
686 /*
687 The ring is completely closed down now. We just need to empty anything left
688 on our freelists and harvest anything left on the rings.
689 */
691 BOOLEAN
692 XenNet_TxShutdown(xennet_info_t *xi) {
693 #if NTDDI_VERSION < NTDDI_VISTA
694 PNDIS_PACKET packet;
695 #else
696 PNET_BUFFER packet;
697 PNET_BUFFER_LIST nbl;
698 #endif
699 PLIST_ENTRY entry;
700 LARGE_INTEGER timeout;
701 KIRQL old_irql;
703 FUNCTION_ENTER();
705 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
707 while (xi->tx_outstanding) {
708 KeReleaseSpinLock(&xi->tx_lock, old_irql);
709 FUNCTION_MSG("Waiting for %d remaining packets to be sent\n", xi->tx_outstanding);
710 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
711 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
712 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
713 }
714 KeReleaseSpinLock(&xi->tx_lock, old_irql);
716 /* Free packets in tx queue */
717 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
718 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
719 #if NTDDI_VERSION < NTDDI_VISTA
720 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
721 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
722 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
723 #else
724 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
725 nbl = NB_NBL(packet);
726 NBL_REF(nbl)--;
727 if (!NBL_REF(nbl)) {
728 nbl->Status = NDIS_STATUS_FAILURE;
729 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
730 }
731 #endif
732 }
733 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
735 FUNCTION_EXIT();
737 return TRUE;
738 }