win-pvdrivers

view xennet/xennet_tx.c @ 265:8fef16f8fc08

fix warnings on x64 build. Xen apparently limits PFNs to 32 bits, so make this limitation stand out a little more
author Andy Grover <andy.grover@oracle.com>
date Mon May 05 13:24:03 2008 -0700 (2008-05-05)
parents eccaff2083eb
children 3673f1f07746
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 #define FREELIST_ID_ERROR 0xFFFF
25 #ifdef XEN_PROFILE
26 #define PC_INC(var) var++
27 #else
28 #define PC_INC(var)
29 #endif
31 static ULONG
32 free_requests(struct xennet_info *xi)
33 {
34 return xi->tx_id_free - xi->tx_no_id_used;
35 }
37 static USHORT
38 get_id_from_freelist(struct xennet_info *xi)
39 {
40 if (xi->tx_id_free - xi->tx_no_id_used == 0)
41 {
42 KdPrint((__DRIVER_NAME " Out of id's\n"));
43 return FREELIST_ID_ERROR;
44 }
45 xi->tx_id_free--;
47 return xi->tx_id_list[xi->tx_id_free];
48 }
50 static USHORT
51 get_no_id_from_freelist(struct xennet_info *xi)
52 {
53 if (xi->tx_id_free - xi->tx_no_id_used == 0)
54 {
55 KdPrint((__DRIVER_NAME " Out of no_id's\n"));
56 return FREELIST_ID_ERROR;
57 }
58 xi->tx_no_id_used++;
59 return 0;
60 }
62 static VOID
63 put_id_on_freelist(struct xennet_info *xi, USHORT id)
64 {
65 xi->tx_id_list[xi->tx_id_free] = id;
66 xi->tx_id_free++;
67 }
69 static VOID
70 put_no_id_on_freelist(struct xennet_info *xi)
71 {
72 xi->tx_no_id_used--;
73 }
74 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
76 /* Place a buffer on tx ring. */
77 static struct netif_tx_request*
78 XenNet_PutOnTxRing(
79 struct xennet_info *xi,
80 PMDL mdl,
81 uint16_t flags)
82 {
83 struct netif_tx_request *tx;
85 unsigned short id;
87 id = get_id_from_freelist(xi);
88 ASSERT(id != FREELIST_ID_ERROR);
89 ASSERT(xi->tx_pkts[id] == NULL);
90 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
91 tx->gref = get_grant_ref(mdl);
92 xi->tx_mdls[id] = mdl;
93 tx->id = id;
94 tx->offset = 0;
95 tx->size = (USHORT)MmGetMdlByteCount(mdl);
96 tx->flags = flags;
97 PC_INC(ProfCount_TxPacketsTotal);
99 return tx;
100 }
102 /* Called at DISPATCH_LEVEL with tx_lock held */
103 /*
104 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
105 */
106 static BOOLEAN
107 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
108 {
109 struct netif_tx_request *tx = NULL;
110 struct netif_extra_info *ei = NULL;
111 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
112 UINT total_packet_length;
113 ULONG mss;
114 PMDL in_mdl;
115 PUCHAR in_buffer = NULL;
116 PUCHAR out_buffer;
117 USHORT in_remaining;
118 USHORT out_remaining;
119 uint16_t flags = NETTXF_more_data;
120 packet_info_t pi;
121 BOOLEAN ndis_lso = FALSE;
122 BOOLEAN xen_gso = FALSE;
123 int pages_required;
124 int page_num;
125 USHORT copied;
127 #if defined(XEN_PROFILE)
128 LARGE_INTEGER tsc, dummy;
130 tsc = KeQueryPerformanceCounter(&dummy);
131 #endif
133 RtlZeroMemory(&pi, sizeof(pi));
135 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
136 packet, TcpIpChecksumPacketInfo);
137 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
139 if (mss)
140 ndis_lso = TRUE;
142 NdisQueryPacket(packet, NULL, NULL, &in_mdl, &total_packet_length);
144 pages_required = (total_packet_length + PAGE_SIZE - 1) / PAGE_SIZE;
146 if (pages_required + !!ndis_lso > (int)free_requests(xi))
147 {
148 KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", pages_required + !!ndis_lso, (int)free_requests(xi)));
149 return FALSE;
150 }
152 for (page_num = 0, in_remaining = 0; page_num < pages_required; page_num++)
153 {
154 pi.mdls[page_num] = XenFreelist_GetPage(&xi->tx_freelist);
155 out_buffer = MmGetMdlVirtualAddress(pi.mdls[page_num]);
156 out_remaining = (USHORT)min(PAGE_SIZE, total_packet_length - page_num * PAGE_SIZE);
157 NdisAdjustBufferLength(pi.mdls[page_num], out_remaining);
158 while (out_remaining > 0)
159 {
160 if (!in_remaining)
161 {
162 ASSERT(in_mdl);
163 in_buffer = MmGetSystemAddressForMdlSafe(in_mdl, LowPagePriority);
164 ASSERT(in_buffer != NULL);
165 in_remaining = (USHORT)MmGetMdlByteCount(in_mdl);
166 }
167 copied = min(in_remaining, out_remaining);
168 memcpy(out_buffer, in_buffer, copied);
169 in_remaining = in_remaining - copied;
170 in_buffer += copied;
171 out_remaining = out_remaining - copied;
172 out_buffer += copied;
173 if (!in_remaining)
174 in_mdl = in_mdl->Next;
175 }
176 }
177 ASSERT(!in_mdl);
179 if (csum_info->Transmit.NdisPacketTcpChecksum
180 || csum_info->Transmit.NdisPacketUdpChecksum)
181 {
182 flags |= NETTXF_csum_blank | NETTXF_data_validated;
183 PC_INC(ProfCount_TxPacketsCsumOffload);
184 }
186 if (ndis_lso)
187 {
188 XenNet_ParsePacketHeader(&pi);
189 XenNet_SumIpHeader(MmGetSystemAddressForMdlSafe(pi.mdls[0], NormalPagePriority), pi.ip4_header_length);
190 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
191 if (pi.tcp_length >= mss)
192 {
193 flags |= NETTXF_extra_info;
194 xen_gso = TRUE;
195 }
196 }
198 /*
199 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
200 * (C) rest of requests on the ring. Only (A) has csum flags.
201 */
203 /* (A) */
204 tx = XenNet_PutOnTxRing(xi, pi.mdls[0], flags);
205 tx->size = (USHORT)total_packet_length;
206 xi->tx.req_prod_pvt++;
208 /* (B) */
209 if (xen_gso)
210 {
211 ASSERT(flags & NETTXF_extra_info);
212 get_no_id_from_freelist(xi);
213 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
214 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
215 ei->flags = 0;
216 ei->u.gso.size = (USHORT)mss;
217 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
218 ei->u.gso.pad = 0;
219 ei->u.gso.features = 0;
221 xi->tx.req_prod_pvt++;
222 }
224 /* (C) */
225 for (page_num = 1; page_num < pages_required; page_num++)
226 {
227 tx = XenNet_PutOnTxRing(xi, pi.mdls[page_num], NETTXF_more_data);
228 xi->tx.req_prod_pvt++;
229 }
231 /* only set the packet on the last buffer, clear more_data */
232 xi->tx_pkts[tx->id] = packet;
233 tx->flags &= ~NETTXF_more_data;
235 if (ndis_lso)
236 {
237 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
238 }
240 return TRUE;
241 }
243 /* Called at DISPATCH_LEVEL with tx_lock held */
245 static VOID
246 XenNet_SendQueuedPackets(struct xennet_info *xi)
247 {
248 PLIST_ENTRY entry;
249 PNDIS_PACKET packet;
250 int notify;
251 #if defined(XEN_PROFILE)
252 LARGE_INTEGER tsc, dummy;
253 #endif
255 #if DBG
256 int cycles = 0;
257 #endif
258 BOOLEAN success;
260 #if defined(XEN_PROFILE)
261 tsc = KeQueryPerformanceCounter(&dummy);
262 #endif
264 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
265 /* if empty, the above returns head*, not NULL */
266 while (entry != &xi->tx_waiting_pkt_list)
267 {
268 ASSERT(cycles++ < 65536);
269 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
270 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
271 success = XenNet_HWSendPacket(xi, packet);
272 if (!success)
273 {
274 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
275 break;
276 }
277 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
278 }
280 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
281 if (notify)
282 {
283 xi->XenInterface.EvtChn_Notify(xi->XenInterface.InterfaceHeader.Context,
284 xi->event_channel);
285 }
287 #if defined(XEN_PROFILE)
288 ProfTime_SendQueuedPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
289 ProfCount_SendQueuedPackets++;
290 #endif
291 }
293 // Called at DISPATCH_LEVEL
294 NDIS_STATUS
295 XenNet_TxBufferGC(struct xennet_info *xi)
296 {
297 RING_IDX cons, prod;
298 unsigned short id;
299 PNDIS_PACKET packets[NET_TX_RING_SIZE];
300 ULONG packet_count = 0;
301 int moretodo;
302 ULONG i;
303 #if DBG
304 int cycles = 0;
305 #endif
306 #if defined(XEN_PROFILE)
307 LARGE_INTEGER tsc, dummy;
308 #endif
310 ASSERT(xi->connected);
311 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
313 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
315 #if defined(XEN_PROFILE)
316 tsc = KeQueryPerformanceCounter(&dummy);
317 #endif
319 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
321 do {
322 ASSERT(cycles++ < 65536);
323 prod = xi->tx.sring->rsp_prod;
324 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
326 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
327 {
328 struct netif_tx_response *txrsp;
330 ASSERT(cycles++ < 65536);
332 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
333 if (txrsp->status == NETIF_RSP_NULL)
334 {
335 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
336 put_no_id_on_freelist(xi);
337 continue; // This would be the response to an extra_info packet
338 }
340 id = txrsp->id;
342 packets[packet_count] = xi->tx_pkts[id];
343 if (packets[packet_count])
344 {
345 xi->tx_pkts[id] = NULL;
346 packet_count++;
347 xi->stat_tx_ok++;
348 }
349 if (xi->tx_mdls[id])
350 {
351 NdisAdjustBufferLength(xi->tx_mdls[id], PAGE_SIZE);
352 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[id]);
353 xi->tx_mdls[id] = NULL;
354 }
355 put_id_on_freelist(xi, id);
356 }
358 xi->tx.rsp_cons = prod;
360 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
361 } while (moretodo);
363 /* if queued packets, send them now */
364 XenNet_SendQueuedPackets(xi);
366 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
368 for (i = 0; i < packet_count; i++)
369 {
370 /* A miniport driver must release any spin lock that it is holding before
371 calling NdisMSendComplete. */
372 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
373 }
375 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
377 #if defined(XEN_PROFILE)
378 ProfTime_TxBufferGC.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
379 ProfCount_TxBufferGC++;
380 #endif
382 return NDIS_STATUS_SUCCESS;
383 }
385 VOID
386 XenNet_SendPackets(
387 IN NDIS_HANDLE MiniportAdapterContext,
388 IN PPNDIS_PACKET PacketArray,
389 IN UINT NumberOfPackets
390 )
391 {
392 struct xennet_info *xi = MiniportAdapterContext;
393 PNDIS_PACKET packet;
394 UINT i;
395 PLIST_ENTRY entry;
396 KIRQL OldIrql;
397 #if defined(XEN_PROFILE)
398 LARGE_INTEGER tsc, dummy;
399 KIRQL OldIrql2;
400 #endif
402 #if defined(XEN_PROFILE)
403 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
404 tsc = KeQueryPerformanceCounter(&dummy);
405 #endif
407 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
409 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
410 for (i = 0; i < NumberOfPackets; i++)
411 {
412 packet = PacketArray[i];
413 ASSERT(packet);
414 *(ULONG *)&packet->MiniportReservedEx = 0;
415 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
416 InsertTailList(&xi->tx_waiting_pkt_list, entry);
417 #if defined(XEN_PROFILE)
418 ProfCount_PacketsPerSendPackets++;
419 #endif
420 }
422 XenNet_SendQueuedPackets(xi);
424 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
426 #if defined(XEN_PROFILE)
427 ProfTime_SendPackets.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - tsc.QuadPart;
428 ProfCount_SendPackets++;
429 KeLowerIrql(OldIrql2);
430 #endif
432 #if defined(XEN_PROFILE)
433 if ((ProfCount_SendPackets & 1023) == 0)
434 {
435 KdPrint((__DRIVER_NAME " ***\n"));
436 KdPrint((__DRIVER_NAME " RxBufferAlloc Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferAlloc, (ProfCount_RxBufferAlloc == 0)?0:(ProfTime_RxBufferAlloc.QuadPart / ProfCount_RxBufferAlloc)));
437 KdPrint((__DRIVER_NAME " ReturnPacket Count = %10d, Avg Time = %10ld\n", ProfCount_ReturnPacket, (ProfCount_ReturnPacket == 0)?0:(ProfTime_ReturnPacket.QuadPart / ProfCount_ReturnPacket)));
438 KdPrint((__DRIVER_NAME " RxBufferCheck Count = %10d, Avg Time = %10ld\n", ProfCount_RxBufferCheck, (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheck.QuadPart / ProfCount_RxBufferCheck)));
439 KdPrint((__DRIVER_NAME " RxBufferCheckTop Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckTopHalf.QuadPart / ProfCount_RxBufferCheck)));
440 KdPrint((__DRIVER_NAME " RxBufferCheckBot Avg Time = %10ld\n", (ProfCount_RxBufferCheck == 0)?0:(ProfTime_RxBufferCheckBotHalf.QuadPart / ProfCount_RxBufferCheck)));
441 KdPrint((__DRIVER_NAME " Linearize Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
442 KdPrint((__DRIVER_NAME " SendPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
443 KdPrint((__DRIVER_NAME " Packets per SendPackets = %10d\n", (ProfCount_SendPackets == 0)?0:(ProfCount_PacketsPerSendPackets / ProfCount_SendPackets)));
444 KdPrint((__DRIVER_NAME " SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
445 KdPrint((__DRIVER_NAME " TxBufferGC Count = %10d, Avg Time = %10ld\n", ProfCount_TxBufferGC, (ProfCount_TxBufferGC == 0)?0:(ProfTime_TxBufferGC.QuadPart / ProfCount_TxBufferGC)));
446 KdPrint((__DRIVER_NAME " RxPackets Total = %10d, Csum Offload = %10d, Calls To Receive = %10d\n", ProfCount_RxPacketsTotal, ProfCount_RxPacketsCsumOffload, ProfCount_CallsToIndicateReceive));
447 KdPrint((__DRIVER_NAME " TxPackets Total = %10d, Csum Offload = %10d, Large Offload = %10d\n", ProfCount_TxPacketsTotal, ProfCount_TxPacketsCsumOffload, ProfCount_TxPacketsLargeOffload));
448 }
449 #endif
450 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
451 }
453 BOOLEAN
454 XenNet_TxInit(xennet_info_t *xi)
455 {
456 USHORT i;
457 PFN_NUMBER pfn;
459 KeInitializeSpinLock(&xi->tx_lock);
461 xi->tx_mdl = AllocatePage();
462 pfn = *MmGetMdlPfnArray(xi->tx_mdl);
463 xi->tx_pgs = MmGetMdlVirtualAddress(xi->tx_mdl);
464 SHARED_RING_INIT(xi->tx_pgs);
465 FRONT_RING_INIT(&xi->tx, xi->tx_pgs, PAGE_SIZE);
466 xi->tx_ring_ref = xi->XenInterface.GntTbl_GrantAccess(
467 xi->XenInterface.InterfaceHeader.Context, 0,
468 (uint32_t)pfn, FALSE, 0);
469 xi->tx_id_free = 0;
470 xi->tx_no_id_used = 0;
471 for (i = 0; i < NET_TX_RING_SIZE; i++)
472 {
473 put_id_on_freelist(xi, i);
474 }
476 XenFreelist_Init(xi, &xi->tx_freelist, &xi->tx_lock);
478 return TRUE;
479 }
481 /*
482 The ring is completely closed down now. We just need to empty anything left
483 on our freelists and harvest anything left on the rings. The freelist timer
484 will still be running though.
485 */
487 BOOLEAN
488 XenNet_TxShutdown(xennet_info_t *xi)
489 {
490 PLIST_ENTRY entry;
491 PNDIS_PACKET packet;
492 PMDL mdl;
493 ULONG i;
494 KIRQL OldIrql;
496 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
498 ASSERT(!xi->connected);
500 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
501 /* Free packets in tx queue */
502 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
503 while (entry != &xi->tx_waiting_pkt_list)
504 {
505 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
506 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
507 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
508 }
510 /* free sent-but-not-completed packets */
511 for (i = 0; i < NET_TX_RING_SIZE; i++)
512 {
513 packet = xi->tx_pkts[i];
514 if (packet)
515 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
516 mdl = xi->tx_mdls[i];
517 if (mdl)
518 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[i]);
519 }
521 XenFreelist_Dispose(&xi->tx_freelist);
523 /* free TX resources */
524 ASSERT(xi->XenInterface.GntTbl_EndAccess(
525 xi->XenInterface.InterfaceHeader.Context, xi->tx_ring_ref, 0));
526 FreePages(xi->tx_mdl);
527 xi->tx_pgs = NULL;
529 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
531 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
533 return TRUE;
534 }