win-pvdrivers

view xennet/xennet_tx.c @ 266:b88529df8b60

More wdm updates
author James Harper <james.harper@bendigoit.com.au>
date Wed May 07 10:47:03 2008 +1000 (2008-05-07)
parents 6c1ab34c1bda
children 2fc877b00cfd da9b1e17fbc0
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 #define FREELIST_ID_ERROR 0xFFFF
25 #ifdef XEN_PROFILE
26 #define PC_INC(var) var++
27 #else
28 #define PC_INC(var)
29 #endif
31 static ULONG
32 free_requests(struct xennet_info *xi)
33 {
34 return xi->tx_id_free - xi->tx_no_id_used;
35 }
37 static USHORT
38 get_id_from_freelist(struct xennet_info *xi)
39 {
40 if (xi->tx_id_free - xi->tx_no_id_used == 0)
41 {
42 KdPrint((__DRIVER_NAME " Out of id's\n"));
43 return FREELIST_ID_ERROR;
44 }
45 xi->tx_id_free--;
47 return xi->tx_id_list[xi->tx_id_free];
48 }
50 static USHORT
51 get_no_id_from_freelist(struct xennet_info *xi)
52 {
53 if (xi->tx_id_free - xi->tx_no_id_used == 0)
54 {
55 KdPrint((__DRIVER_NAME " Out of no_id's\n"));
56 return FREELIST_ID_ERROR;
57 }
58 xi->tx_no_id_used++;
59 return 0;
60 }
62 static VOID
63 put_id_on_freelist(struct xennet_info *xi, USHORT id)
64 {
65 xi->tx_id_list[xi->tx_id_free] = id;
66 xi->tx_id_free++;
67 }
69 static VOID
70 put_no_id_on_freelist(struct xennet_info *xi)
71 {
72 xi->tx_no_id_used--;
73 }
74 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
76 /* Place a buffer on tx ring. */
77 static struct netif_tx_request*
78 XenNet_PutOnTxRing(
79 struct xennet_info *xi,
80 PMDL mdl,
81 uint16_t flags)
82 {
83 struct netif_tx_request *tx;
85 unsigned short id;
87 id = get_id_from_freelist(xi);
88 ASSERT(id != FREELIST_ID_ERROR);
89 ASSERT(xi->tx_pkts[id] == NULL);
90 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
91 tx->gref = get_grant_ref(mdl);
92 xi->tx_mdls[id] = mdl;
93 tx->id = id;
94 tx->offset = 0;
95 tx->size = (USHORT)MmGetMdlByteCount(mdl);
96 tx->flags = flags;
97 PC_INC(ProfCount_TxPacketsTotal);
99 return tx;
100 }
102 /* Called at DISPATCH_LEVEL with tx_lock held */
103 /*
104 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
105 */
106 static BOOLEAN
107 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
108 {
109 struct netif_tx_request *tx = NULL;
110 struct netif_extra_info *ei = NULL;
111 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
112 UINT total_packet_length;
113 ULONG mss;
114 PMDL in_mdl;
115 PUCHAR in_buffer = NULL;
116 PUCHAR out_buffer;
117 USHORT in_remaining;
118 USHORT out_remaining;
119 uint16_t flags = NETTXF_more_data;
120 packet_info_t pi;
121 BOOLEAN ndis_lso = FALSE;
122 BOOLEAN xen_gso = FALSE;
123 int pages_required;
124 int page_num;
125 USHORT copied;
127 #if defined(XEN_PROFILE)
128 LARGE_INTEGER tsc, dummy;
130 tsc = KeQueryPerformanceCounter(&dummy);
131 #endif
133 RtlZeroMemory(&pi, sizeof(pi));
135 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
136 packet, TcpIpChecksumPacketInfo);
137 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
139 if (mss)
140 ndis_lso = TRUE;
142 NdisQueryPacket(packet, NULL, NULL, &in_mdl, &total_packet_length);
144 pages_required = (total_packet_length + PAGE_SIZE - 1) / PAGE_SIZE;
146 if (pages_required + !!ndis_lso > (int)free_requests(xi))
147 {
148 KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", pages_required + !!ndis_lso, (int)free_requests(xi)));
149 return FALSE;
150 }
152 for (page_num = 0, in_remaining = 0; page_num < pages_required; page_num++)
153 {
154 pi.mdls[page_num] = XenFreelist_GetPage(&xi->tx_freelist);
155 out_buffer = MmGetMdlVirtualAddress(pi.mdls[page_num]);
156 out_remaining = (USHORT)min(PAGE_SIZE, total_packet_length - page_num * PAGE_SIZE);
157 NdisAdjustBufferLength(pi.mdls[page_num], out_remaining);
158 while (out_remaining > 0)
159 {
160 if (!in_remaining)
161 {
162 ASSERT(in_mdl);
163 in_buffer = MmGetSystemAddressForMdlSafe(in_mdl, LowPagePriority);
164 ASSERT(in_buffer != NULL);
165 in_remaining = (USHORT)MmGetMdlByteCount(in_mdl);
166 }
167 copied = min(in_remaining, out_remaining);
168 memcpy(out_buffer, in_buffer, copied);
169 in_remaining = in_remaining - copied;
170 in_buffer += copied;
171 out_remaining = out_remaining - copied;
172 out_buffer += copied;
173 if (!in_remaining)
174 in_mdl = in_mdl->Next;
175 }
176 }
177 ASSERT(!in_mdl);
179 if (csum_info->Transmit.NdisPacketTcpChecksum
180 || csum_info->Transmit.NdisPacketUdpChecksum)
181 {
182 flags |= NETTXF_csum_blank | NETTXF_data_validated;
183 PC_INC(ProfCount_TxPacketsCsumOffload);
184 }
186 if (ndis_lso)
187 {
188 XenNet_ParsePacketHeader(&pi);
189 XenNet_SumIpHeader(MmGetSystemAddressForMdlSafe(pi.mdls[0], NormalPagePriority), pi.ip4_header_length);
190 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
191 if (pi.tcp_length >= mss)
192 {
193 flags |= NETTXF_extra_info;
194 xen_gso = TRUE;
195 }
196 }
198 /*
199 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
200 * (C) rest of requests on the ring. Only (A) has csum flags.
201 */
203 /* (A) */
204 tx = XenNet_PutOnTxRing(xi, pi.mdls[0], flags);
205 tx->size = (USHORT)total_packet_length;
206 xi->tx.req_prod_pvt++;
208 /* (B) */
209 if (xen_gso)
210 {
211 ASSERT(flags & NETTXF_extra_info);
212 get_no_id_from_freelist(xi);
213 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
214 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
215 ei->flags = 0;
216 ei->u.gso.size = (USHORT)mss;
217 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
218 ei->u.gso.pad = 0;
219 ei->u.gso.features = 0;
221 xi->tx.req_prod_pvt++;
222 }
224 /* (C) */
225 for (page_num = 1; page_num < pages_required; page_num++)
226 {
227 tx = XenNet_PutOnTxRing(xi, pi.mdls[page_num], NETTXF_more_data);
228 xi->tx.req_prod_pvt++;
229 }
231 /* only set the packet on the last buffer, clear more_data */
232 xi->tx_pkts[tx->id] = packet;
233 tx->flags &= ~NETTXF_more_data;
235 if (ndis_lso)
236 {
237 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
238 }
240 return TRUE;
241 }
243 /* Called at DISPATCH_LEVEL with tx_lock held */
245 static VOID
246 XenNet_SendQueuedPackets(struct xennet_info *xi)
247 {
248 PLIST_ENTRY entry;
249 PNDIS_PACKET packet;
250 int notify;
251 #if defined(XEN_PROFILE)
252 LARGE_INTEGER tsc, dummy;
253 #endif
255 int cycles = 0;
256 BOOLEAN success;
258 #if defined(XEN_PROFILE)
259 tsc = KeQueryPerformanceCounter(&dummy);
260 #endif
262 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
263 /* if empty, the above returns head*, not NULL */
264 while (entry != &xi->tx_waiting_pkt_list)
265 {
266 ASSERT(cycles++ < 65536);
267 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
268 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
269 success = XenNet_HWSendPacket(xi, packet);
270 if (!success)
271 {
272 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
273 break;
274 }
275 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
276 }
278 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
279 if (notify)
280 {
281 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
282 xi->event_channel);
283 }
285 #if defined(XEN_PROFILE)
286 ProfTime_SendQueuedPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
287 ProfCount_SendQueuedPackets++;
288 #endif
289 }
291 // Called at DISPATCH_LEVEL
292 NDIS_STATUS
293 XenNet_TxBufferGC(struct xennet_info *xi)
294 {
295 RING_IDX cons, prod;
296 unsigned short id;
297 PNDIS_PACKET packets[NET_TX_RING_SIZE];
298 ULONG packet_count = 0;
299 int moretodo;
300 ULONG i;
301 int cycles = 0;
302 #if defined(XEN_PROFILE)
303 LARGE_INTEGER tsc, dummy;
304 #endif
306 ASSERT(xi->connected);
307 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
309 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
311 #if defined(XEN_PROFILE)
312 tsc = KeQueryPerformanceCounter(&dummy);
313 #endif
315 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
317 do {
318 ASSERT(cycles++ < 65536);
319 prod = xi->tx.sring->rsp_prod;
320 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
322 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
323 {
324 struct netif_tx_response *txrsp;
326 ASSERT(cycles++ < 65536);
328 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
329 if (txrsp->status == NETIF_RSP_NULL)
330 {
331 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
332 put_no_id_on_freelist(xi);
333 continue; // This would be the response to an extra_info packet
334 }
336 id = txrsp->id;
338 packets[packet_count] = xi->tx_pkts[id];
339 if (packets[packet_count])
340 {
341 xi->tx_pkts[id] = NULL;
342 packet_count++;
343 xi->stat_tx_ok++;
344 }
345 if (xi->tx_mdls[id])
346 {
347 NdisAdjustBufferLength(xi->tx_mdls[id], PAGE_SIZE);
348 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[id]);
349 xi->tx_mdls[id] = NULL;
350 }
351 put_id_on_freelist(xi, id);
352 }
354 xi->tx.rsp_cons = prod;
356 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
357 } while (moretodo);
359 /* if queued packets, send them now */
360 XenNet_SendQueuedPackets(xi);
362 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
364 for (i = 0; i < packet_count; i++)
365 {
366 /* A miniport driver must release any spin lock that it is holding before
367 calling NdisMSendComplete. */
368 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
369 }
371 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
373 #if defined(XEN_PROFILE)
374 ProfTime_TxBufferGC.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
375 ProfCount_TxBufferGC++;
376 #endif
378 return NDIS_STATUS_SUCCESS;
379 }
381 VOID
382 XenNet_SendPackets(
383 IN NDIS_HANDLE MiniportAdapterContext,
384 IN PPNDIS_PACKET PacketArray,
385 IN UINT NumberOfPackets
386 )
387 {
388 struct xennet_info *xi = MiniportAdapterContext;
389 PNDIS_PACKET packet;
390 UINT i;
391 PLIST_ENTRY entry;
392 KIRQL OldIrql;
393 #if defined(XEN_PROFILE)
394 LARGE_INTEGER tsc, dummy;
395 KIRQL OldIrql2;
396 #endif
398 #if defined(XEN_PROFILE)
399 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
400 tsc = KeQueryPerformanceCounter(&dummy);
401 #endif
403 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
405 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
406 for (i = 0; i < NumberOfPackets; i++)
407 {
408 packet = PacketArray[i];
409 ASSERT(packet);
410 *(ULONG *)&packet->MiniportReservedEx = 0;
411 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
412 InsertTailList(&xi->tx_waiting_pkt_list, entry);
413 #if defined(XEN_PROFILE)
414 ProfCount_PacketsPerSendPackets++;
415 #endif
416 }
418 XenNet_SendQueuedPackets(xi);
420 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
422 #if defined(XEN_PROFILE)
423 ProfTime_SendPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
424 ProfCount_SendPackets++;
425 KeLowerIrql(OldIrql2);
426 #endif
428 #if defined(XEN_PROFILE)
429 if ((ProfCount_SendPackets & 1023) == 0)
430 {
431 KdPrint((__DRIVER_NAME " ***\n"));
432 KdPrint((__DRIVER_NAME " RxBufferAlloc Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferAlloc, (ProfCount_RxBufferAlloc == 0)?0:(ProfTime_RxBufferAlloc.QuadPart / ProfCount_RxBufferAlloc)));
433 KdPrint((__DRIVER_NAME " ReturnPacket Count = %10d, Avg Time = %10ld\n", ProfCount_ReturnPacket, (ProfCount_ReturnPacket == 0)?0:(ProfTime_ReturnPacket.QuadPart / ProfCount_ReturnPacket)));
434 KdPrint((__DRIVER_NAME " RxBufferCheck Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferCheck, (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheck.QuadPart / ProfCount_RxBufferCheck)));
435 KdPrint((__DRIVER_NAME " RxBufferCheckTop Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckTopHalf.QuadPart / ProfCount_RxBufferCheck)));
436 KdPrint((__DRIVER_NAME " RxBufferCheckBot Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckBotHalf.QuadPart / ProfCount_RxBufferCheck)));
437 KdPrint((__DRIVER_NAME " Linearize Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
438 KdPrint((__DRIVER_NAME " SendPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
439 KdPrint((__DRIVER_NAME " Packets per SendPackets = %10d\n", (ProfCount_SendPackets == 0)?0:(ProfCount_PacketsPerSendPackets / ProfCount_SendPackets)));
440 KdPrint((__DRIVER_NAME " SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
441 KdPrint((__DRIVER_NAME " TxBufferGC Count = %10d, Avg Time = %10ld\n", ProfCount_TxBufferGC, (ProfCount_TxBufferGC == 0)?0:(ProfTime_TxBufferGC.QuadPart / ProfCount_TxBufferGC)));
442 KdPrint((__DRIVER_NAME " RxPackets Total = %10d, Csum Offload = %10d, Calls To Receive = %10d\n", ProfCount_RxPacketsTotal, ProfCount_RxPacketsCsumOffload, ProfCount_CallsToIndicateReceive));
443 KdPrint((__DRIVER_NAME " TxPackets Total = %10d, Csum Offload = %10d, Large Offload = %10d\n", ProfCount_TxPacketsTotal, ProfCount_TxPacketsCsumOffload, ProfCount_TxPacketsLargeOffload));
444 }
445 #endif
446 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
447 }
449 BOOLEAN
450 XenNet_TxInit(xennet_info_t *xi)
451 {
452 USHORT i;
454 KeInitializeSpinLock(&xi->tx_lock);
456 xi->tx_mdl = AllocatePage();
457 xi->tx_pgs = MmGetMdlVirtualAddress(xi->tx_mdl);
458 SHARED_RING_INIT(xi->tx_pgs);
459 FRONT_RING_INIT(&xi->tx, xi->tx_pgs, PAGE_SIZE);
460 xi->tx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
461 xi->XenInterface.InterfaceHeader.Context, 0,
462 *MmGetMdlPfnArray(xi->tx_mdl), FALSE, 0);
463 xi->tx_id_free = 0;
464 xi->tx_no_id_used = 0;
465 for (i = 0; i < NET_TX_RING_SIZE; i++)
466 {
467 put_id_on_freelist(xi, i);
468 }
470 XenFreelist_Init(xi, &xi->tx_freelist, &xi->tx_lock);
472 return TRUE;
473 }
475 /*
476 The ring is completely closed down now. We just need to empty anything left
477 on our freelists and harvest anything left on the rings. The freelist timer
478 will still be running though.
479 */
481 BOOLEAN
482 XenNet_TxShutdown(xennet_info_t *xi)
483 {
484 PLIST_ENTRY entry;
485 PNDIS_PACKET packet;
486 PMDL mdl;
487 ULONG i;
488 KIRQL OldIrql;
490 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
492 ASSERT(!xi->connected);
494 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
495 /* Free packets in tx queue */
496 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
497 while (entry != &xi->tx_waiting_pkt_list)
498 {
499 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
500 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
501 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
502 }
504 /* free sent-but-not-completed packets */
505 for (i = 0; i < NET_TX_RING_SIZE; i++)
506 {
507 packet = xi->tx_pkts[i];
508 if (packet)
509 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
510 mdl = xi->tx_mdls[i];
511 if (mdl)
512 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[i]);
513 }
515 XenFreelist_Dispose(&xi->tx_freelist);
517 /* free TX resources */
518 ASSERT(xi->XenInterface.GntTbl_EndAccess(
519 xi->XenInterface.InterfaceHeader.Context, xi->tx_ring_ref, 0));
520 FreePages(xi->tx_mdl);
521 xi->tx_pgs = NULL;
523 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
525 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
527 return TRUE;
528 }