win-pvdrivers

view xennet/xennet_tx.c @ 370:d76831a77d19

merge
author Andy Grover <andy.grover@oracle.com>
date Wed Jul 09 00:26:05 2008 -0700 (2008-07-09)
parents 5a762fd1fba9
children 8e10579159a0
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 #define FREELIST_ID_ERROR 0xFFFF
25 #ifdef XEN_PROFILE
26 #define PC_INC(var) var++
27 #else
28 #define PC_INC(var)
29 #endif
31 static ULONG
32 free_requests(struct xennet_info *xi)
33 {
34 return xi->tx_id_free - xi->tx_no_id_used;
35 }
37 static USHORT
38 get_id_from_freelist(struct xennet_info *xi)
39 {
40 if (xi->tx_id_free - xi->tx_no_id_used == 0)
41 {
42 KdPrint((__DRIVER_NAME " Out of id's\n"));
43 return FREELIST_ID_ERROR;
44 }
45 xi->tx_id_free--;
47 return xi->tx_id_list[xi->tx_id_free];
48 }
50 static USHORT
51 get_no_id_from_freelist(struct xennet_info *xi)
52 {
53 if (xi->tx_id_free - xi->tx_no_id_used == 0)
54 {
55 KdPrint((__DRIVER_NAME " Out of no_id's\n"));
56 return FREELIST_ID_ERROR;
57 }
58 xi->tx_no_id_used++;
59 return 0;
60 }
62 static VOID
63 put_id_on_freelist(struct xennet_info *xi, USHORT id)
64 {
65 xi->tx_id_list[xi->tx_id_free] = id;
66 xi->tx_id_free++;
67 }
69 static VOID
70 put_no_id_on_freelist(struct xennet_info *xi)
71 {
72 xi->tx_no_id_used--;
73 }
74 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
76 /* Place a buffer on tx ring. */
77 static struct netif_tx_request*
78 XenNet_PutOnTxRing(
79 struct xennet_info *xi,
80 PMDL mdl,
81 uint16_t flags)
82 {
83 struct netif_tx_request *tx;
85 unsigned short id;
87 id = get_id_from_freelist(xi);
88 ASSERT(id != FREELIST_ID_ERROR);
89 ASSERT(xi->tx_pkts[id] == NULL);
90 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
91 tx->gref = get_grant_ref(mdl);
92 xi->tx_mdls[id] = mdl;
93 tx->id = id;
94 tx->offset = 0;
95 tx->size = (USHORT)MmGetMdlByteCount(mdl);
96 tx->flags = flags;
97 PC_INC(ProfCount_TxPacketsTotal);
99 return tx;
100 }
102 /* Called at DISPATCH_LEVEL with tx_lock held */
103 /*
104 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
105 */
106 static BOOLEAN
107 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
108 {
109 struct netif_tx_request *tx = NULL;
110 struct netif_extra_info *ei = NULL;
111 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
112 UINT total_packet_length;
113 ULONG mss;
114 PMDL in_mdl;
115 PUCHAR in_buffer = NULL;
116 PUCHAR out_buffer;
117 USHORT in_remaining;
118 USHORT out_remaining;
119 uint16_t flags = NETTXF_more_data;
120 packet_info_t pi;
121 BOOLEAN ndis_lso = FALSE;
122 BOOLEAN xen_gso = FALSE;
123 int pages_required;
124 int page_num;
125 USHORT copied;
127 #if defined(XEN_PROFILE)
128 LARGE_INTEGER tsc, dummy;
130 tsc = KeQueryPerformanceCounter(&dummy);
131 #endif
133 RtlZeroMemory(&pi, sizeof(pi));
135 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
136 packet, TcpIpChecksumPacketInfo);
137 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
139 if (mss)
140 ndis_lso = TRUE;
142 NdisQueryPacket(packet, NULL, NULL, &in_mdl, &total_packet_length);
144 pages_required = (total_packet_length + PAGE_SIZE - 1) / PAGE_SIZE;
146 if (pages_required + !!ndis_lso > (int)free_requests(xi))
147 {
148 KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", pages_required + !!ndis_lso, (int)free_requests(xi)));
149 return FALSE;
150 }
152 for (page_num = 0, in_remaining = 0; page_num < pages_required; page_num++)
153 {
154 pi.mdls[page_num] = XenFreelist_GetPage(&xi->tx_freelist);
155 out_buffer = MmGetMdlVirtualAddress(pi.mdls[page_num]);
156 out_remaining = (USHORT)min(PAGE_SIZE, total_packet_length - page_num * PAGE_SIZE);
157 NdisAdjustBufferLength(pi.mdls[page_num], out_remaining);
158 while (out_remaining > 0)
159 {
160 if (!in_remaining)
161 {
162 ASSERT(in_mdl);
163 in_buffer = MmGetSystemAddressForMdlSafe(in_mdl, LowPagePriority);
164 ASSERT(in_buffer != NULL);
165 in_remaining = (USHORT)MmGetMdlByteCount(in_mdl);
166 }
167 copied = min(in_remaining, out_remaining);
168 memcpy(out_buffer, in_buffer, copied);
169 in_remaining = in_remaining - copied;
170 in_buffer += copied;
171 out_remaining = out_remaining - copied;
172 out_buffer += copied;
173 if (!in_remaining)
174 in_mdl = in_mdl->Next;
175 }
176 }
177 ASSERT(!in_mdl);
179 if (csum_info->Transmit.NdisPacketTcpChecksum
180 || csum_info->Transmit.NdisPacketUdpChecksum)
181 {
182 flags |= NETTXF_csum_blank | NETTXF_data_validated;
183 PC_INC(ProfCount_TxPacketsCsumOffload);
184 }
186 if (ndis_lso)
187 {
188 XenNet_ParsePacketHeader(&pi);
189 XenNet_SumIpHeader(MmGetSystemAddressForMdlSafe(pi.mdls[0], NormalPagePriority), pi.ip4_header_length);
190 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
191 if (pi.tcp_length >= mss)
192 {
193 flags |= NETTXF_extra_info;
194 xen_gso = TRUE;
195 }
196 }
198 /*
199 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
200 * (C) rest of requests on the ring. Only (A) has csum flags.
201 */
203 /* (A) */
204 tx = XenNet_PutOnTxRing(xi, pi.mdls[0], flags);
205 tx->size = (USHORT)total_packet_length;
206 xi->tx.req_prod_pvt++;
208 /* (B) */
209 if (xen_gso)
210 {
211 ASSERT(flags & NETTXF_extra_info);
212 get_no_id_from_freelist(xi);
213 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
214 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
215 ei->flags = 0;
216 ei->u.gso.size = (USHORT)mss;
217 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
218 ei->u.gso.pad = 0;
219 ei->u.gso.features = 0;
221 xi->tx.req_prod_pvt++;
222 }
224 /* (C) */
225 for (page_num = 1; page_num < pages_required; page_num++)
226 {
227 tx = XenNet_PutOnTxRing(xi, pi.mdls[page_num], NETTXF_more_data);
228 xi->tx.req_prod_pvt++;
229 }
231 /* only set the packet on the last buffer, clear more_data */
232 xi->tx_pkts[tx->id] = packet;
233 tx->flags &= ~NETTXF_more_data;
235 if (ndis_lso)
236 {
237 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
238 }
240 return TRUE;
241 }
243 /* Called at DISPATCH_LEVEL with tx_lock held */
245 static VOID
246 XenNet_SendQueuedPackets(struct xennet_info *xi)
247 {
248 PLIST_ENTRY entry;
249 PNDIS_PACKET packet;
250 int notify;
251 BOOLEAN success;
253 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
254 /* if empty, the above returns head*, not NULL */
255 while (entry != &xi->tx_waiting_pkt_list)
256 {
257 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
258 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
259 success = XenNet_HWSendPacket(xi, packet);
260 if (!success)
261 {
262 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
263 break;
264 }
265 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
266 }
268 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
269 if (notify)
270 {
271 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
272 }
273 }
275 // Called at DISPATCH_LEVEL
276 NDIS_STATUS
277 XenNet_TxBufferGC(struct xennet_info *xi)
278 {
279 RING_IDX cons, prod;
280 unsigned short id;
281 PNDIS_PACKET packets[NET_TX_RING_SIZE];
282 ULONG packet_count = 0;
283 int moretodo;
284 ULONG i;
286 ASSERT(xi->connected);
287 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
289 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
292 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
294 do {
295 prod = xi->tx.sring->rsp_prod;
296 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
298 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
299 {
300 struct netif_tx_response *txrsp;
302 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
303 if (txrsp->status == NETIF_RSP_NULL)
304 {
305 // KdPrint((__DRIVER_NAME " NETIF_RSP_NULL\n"));
306 put_no_id_on_freelist(xi);
307 continue; // This would be the response to an extra_info packet
308 }
310 id = txrsp->id;
312 packets[packet_count] = xi->tx_pkts[id];
313 if (packets[packet_count])
314 {
315 xi->tx_pkts[id] = NULL;
316 packet_count++;
317 xi->stat_tx_ok++;
318 }
319 if (xi->tx_mdls[id])
320 {
321 NdisAdjustBufferLength(xi->tx_mdls[id], PAGE_SIZE);
322 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[id]);
323 xi->tx_mdls[id] = NULL;
324 }
325 put_id_on_freelist(xi, id);
326 }
328 xi->tx.rsp_cons = prod;
330 RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
331 } while (moretodo);
333 /* if queued packets, send them now */
334 XenNet_SendQueuedPackets(xi);
336 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
338 // if (packet_count)
339 // KdPrint((__DRIVER_NAME " --- " __FUNCTION__ " %d packets completed\n"));
340 for (i = 0; i < packet_count; i++)
341 {
342 /* A miniport driver must release any spin lock that it is holding before
343 calling NdisMSendComplete. */
344 NdisMSendComplete(xi->adapter_handle, packets[i], NDIS_STATUS_SUCCESS);
345 }
347 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
349 return NDIS_STATUS_SUCCESS;
350 }
352 // called at <= DISPATCH_LEVEL
353 VOID DDKAPI
354 XenNet_SendPackets(
355 IN NDIS_HANDLE MiniportAdapterContext,
356 IN PPNDIS_PACKET PacketArray,
357 IN UINT NumberOfPackets
358 )
359 {
360 struct xennet_info *xi = MiniportAdapterContext;
361 PNDIS_PACKET packet;
362 UINT i;
363 PLIST_ENTRY entry;
364 KIRQL OldIrql;
366 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
368 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (packets = %d, free_requests = %d)\n", NumberOfPackets, free_requests(xi)));
369 for (i = 0; i < NumberOfPackets; i++)
370 {
371 packet = PacketArray[i];
372 ASSERT(packet);
373 *(ULONG *)&packet->MiniportReservedEx = 0;
374 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
375 InsertTailList(&xi->tx_waiting_pkt_list, entry);
376 }
378 if (xi->device_state->resume_state == RESUME_STATE_RUNNING)
379 XenNet_SendQueuedPackets(xi);
381 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
383 // KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
384 }
386 VOID
387 XenNet_TxResumeStart(xennet_info_t *xi)
388 {
389 int i;
390 KIRQL old_irql;
391 PLIST_ENTRY entry;
393 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
394 for (i = 0; i < NET_TX_RING_SIZE; i++)
395 {
396 if (xi->tx_mdls[i])
397 {
398 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[i]);
399 xi->tx_mdls[i] = NULL;
400 }
401 /* this may result in packets being sent out of order... I don't think it matters though */
402 if (xi->tx_pkts[i])
403 {
404 *(ULONG *)&xi->tx_pkts[i]->MiniportReservedEx = 0;
405 entry = (PLIST_ENTRY)&xi->tx_pkts[i]->MiniportReservedEx[sizeof(PVOID)];
406 InsertTailList(&xi->tx_waiting_pkt_list, entry);
407 xi->tx_pkts[i] = 0;
408 }
409 }
410 XenFreelist_ResumeStart(&xi->tx_freelist);
411 xi->tx_id_free = 0;
412 xi->tx_no_id_used = 0;
413 for (i = 0; i < NET_TX_RING_SIZE; i++)
414 put_id_on_freelist(xi, (USHORT)i);
415 KeReleaseSpinLock(&xi->tx_lock, old_irql);
416 }
418 VOID
419 XenNet_TxResumeEnd(xennet_info_t *xi)
420 {
421 KIRQL old_irql;
423 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
424 XenFreelist_ResumeEnd(&xi->tx_freelist);
425 XenNet_SendQueuedPackets(xi);
426 KeReleaseSpinLock(&xi->tx_lock, old_irql);
427 }
429 BOOLEAN
430 XenNet_TxInit(xennet_info_t *xi)
431 {
432 USHORT i;
434 KeInitializeSpinLock(&xi->tx_lock);
436 xi->tx_id_free = 0;
437 xi->tx_no_id_used = 0;
438 for (i = 0; i < NET_TX_RING_SIZE; i++)
439 {
440 put_id_on_freelist(xi, i);
441 }
443 XenFreelist_Init(xi, &xi->tx_freelist, &xi->tx_lock);
445 return TRUE;
446 }
448 /*
449 The ring is completely closed down now. We just need to empty anything left
450 on our freelists and harvest anything left on the rings. The freelist timer
451 will still be running though.
452 */
454 BOOLEAN
455 XenNet_TxShutdown(xennet_info_t *xi)
456 {
457 PLIST_ENTRY entry;
458 PNDIS_PACKET packet;
459 PMDL mdl;
460 ULONG i;
461 KIRQL OldIrql;
463 KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
465 ASSERT(!xi->connected);
467 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
469 /* Free packets in tx queue */
470 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
471 while (entry != &xi->tx_waiting_pkt_list)
472 {
473 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
474 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
475 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
476 }
478 /* free sent-but-not-completed packets */
479 for (i = 0; i < NET_TX_RING_SIZE; i++)
480 {
481 packet = xi->tx_pkts[i];
482 if (packet)
483 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
484 mdl = xi->tx_mdls[i];
485 if (mdl)
486 XenFreelist_PutPage(&xi->tx_freelist, xi->tx_mdls[i]);
487 }
489 XenFreelist_Dispose(&xi->tx_freelist);
491 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
493 KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
495 return TRUE;
496 }