win-pvdrivers

view xennet/xennet_common.c @ 1033:cb767700f91c

Correctly initialise pi values and set header size based on lookahead
author James Harper <james.harper@bendigoit.com.au>
date Sun Mar 03 13:49:54 2013 +1100 (2013-03-03)
parents 1ce315b193d1
children fba0ce4d9e54
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Increase the header to a certain size */
24 BOOLEAN
25 XenNet_BuildHeader(packet_info_t *pi, PUCHAR header, ULONG new_header_size)
26 {
27 ULONG bytes_remaining;
29 //FUNCTION_ENTER();
31 if (!header)
32 header = pi->header;
34 if (new_header_size > pi->total_length) {
35 new_header_size = pi->total_length;
36 }
38 if (new_header_size <= pi->header_length) {
39 //FUNCTION_EXIT();
40 return TRUE; /* header is already at least the required size */
41 }
43 if (header == pi->first_mdl_virtual) {
44 /* still working in the first buffer */
45 if (new_header_size <= pi->first_mdl_length) {
46 //KdPrint((__DRIVER_NAME " new_header_size <= pi->first_mdl_length\n"));
47 pi->header_length = new_header_size;
48 if (pi->header_length == pi->first_mdl_length) {
49 #if NTDDI_VERSION < NTDDI_VISTA
50 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
51 #else
52 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
53 #endif
54 pi->curr_mdl_offset = 0;
55 if (pi->curr_pb)
56 pi->curr_pb = pi->curr_pb->next;
57 } else {
58 pi->curr_mdl_offset = (USHORT)new_header_size;
59 }
60 //FUNCTION_EXIT();
61 return TRUE;
62 } else {
63 //KdPrint((__DRIVER_NAME " Switching to header_data\n"));
64 memcpy(pi->header_data, header, pi->header_length);
65 header = pi->header = pi->header_data;
66 }
67 }
69 bytes_remaining = new_header_size - pi->header_length;
70 // TODO: if there are only a small number of bytes left in the current buffer then increase to consume that too... it would have to be no more than the size of header+mss though
72 //KdPrint((__DRIVER_NAME " A bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
73 while (bytes_remaining && pi->curr_mdl) {
74 ULONG copy_size;
76 XN_ASSERT(pi->curr_mdl);
77 //KdPrint((__DRIVER_NAME " B bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
78 if (MmGetMdlByteCount(pi->curr_mdl)) {
79 PUCHAR src_addr;
80 src_addr = MmGetSystemAddressForMdlSafe(pi->curr_mdl, NormalPagePriority);
81 if (!src_addr) {
82 //FUNCTION_EXIT();
83 return FALSE;
84 }
85 copy_size = min(bytes_remaining, MmGetMdlByteCount(pi->curr_mdl) - pi->curr_mdl_offset);
86 //KdPrint((__DRIVER_NAME " B copy_size = %d\n", copy_size));
87 memcpy(header + pi->header_length,
88 src_addr + pi->curr_mdl_offset, copy_size);
89 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + copy_size);
90 pi->header_length += copy_size;
91 bytes_remaining -= copy_size;
92 }
93 if (pi->curr_mdl_offset == MmGetMdlByteCount(pi->curr_mdl)) {
94 #if NTDDI_VERSION < NTDDI_VISTA
95 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
96 #else
97 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
98 #endif
99 if (pi->curr_pb)
100 pi->curr_pb = pi->curr_pb->next;
101 pi->curr_mdl_offset = 0;
102 }
103 }
104 //KdPrint((__DRIVER_NAME " C bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
105 if (bytes_remaining) {
106 //KdPrint((__DRIVER_NAME " bytes_remaining\n"));
107 //FUNCTION_EXIT();
108 return FALSE;
109 }
110 //FUNCTION_EXIT();
111 return TRUE;
112 }
114 VOID
115 XenNet_ParsePacketHeader(packet_info_t *pi, PUCHAR alt_buffer, ULONG min_header_size)
116 {
117 //FUNCTION_ENTER();
119 XN_ASSERT(pi->first_mdl);
121 #if NTDDI_VERSION < NTDDI_VISTA
122 NdisQueryBufferSafe(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
123 #else
124 NdisQueryMdl(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
125 #endif
126 pi->curr_mdl = pi->first_mdl;
127 if (alt_buffer)
128 pi->header = alt_buffer;
129 else
130 pi->header = pi->first_mdl_virtual;
132 pi->header_length = 0;
133 pi->curr_mdl_offset = pi->first_mdl_offset;
135 pi->ip_proto = 0;
136 pi->ip_version = 0;
137 pi->ip4_header_length = 0;
138 pi->ip4_length = 0;
139 pi->tcp_header_length = 0;
140 pi->tcp_length = 0;
141 pi->split_required = 0;
143 XenNet_BuildHeader(pi, NULL, min_header_size);
145 if (!XenNet_BuildHeader(pi, NULL, (ULONG)XN_HDR_SIZE)) {
146 //KdPrint((__DRIVER_NAME " packet too small (Ethernet Header)\n"));
147 pi->parse_result = PARSE_TOO_SMALL;
148 return;
149 }
151 if (pi->header[0] == 0xFF && pi->header[1] == 0xFF
152 && pi->header[2] == 0xFF && pi->header[3] == 0xFF
153 && pi->header[4] == 0xFF && pi->header[5] == 0xFF) {
154 pi->is_broadcast = TRUE;
155 } else if (pi->header[0] & 0x01) {
156 pi->is_multicast = TRUE;
157 }
159 switch (GET_NET_PUSHORT(&pi->header[12])) { // L2 protocol field
160 case 0x0800: /* IPv4 */
161 //KdPrint((__DRIVER_NAME " IP\n"));
162 if (pi->header_length < (ULONG)(XN_HDR_SIZE + 20)) {
163 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + 20))) {
164 KdPrint((__DRIVER_NAME " packet too small (IP Header)\n"));
165 pi->parse_result = PARSE_TOO_SMALL;
166 return;
167 }
168 }
169 pi->ip_version = (pi->header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
170 if (pi->ip_version != 4) {
171 //KdPrint((__DRIVER_NAME " ip_version = %d\n", pi->ip_version));
172 pi->parse_result = PARSE_UNKNOWN_TYPE;
173 return;
174 }
175 pi->ip4_header_length = (pi->header[XN_HDR_SIZE + 0] & 0x0F) << 2;
176 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20)) {
177 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20))) {
178 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header)\n"));
179 pi->parse_result = PARSE_TOO_SMALL;
180 return;
181 }
182 }
183 break;
184 case 0x86DD: /* IPv6 */
185 //KdPrint((__DRIVER_NAME " IPv6\n"));
186 //KdPrint((__DRIVER_NAME " (not currently used)\n"));
187 pi->parse_result = PARSE_UNKNOWN_TYPE;
188 return;
189 default:
190 //KdPrint((__DRIVER_NAME " Not IP (%04x)\n", GET_NET_PUSHORT(&pi->header[12])));
191 pi->parse_result = PARSE_UNKNOWN_TYPE;
192 return;
193 }
194 pi->ip_proto = pi->header[XN_HDR_SIZE + 9];
195 pi->ip4_length = GET_NET_PUSHORT(&pi->header[XN_HDR_SIZE + 2]);
196 pi->ip_has_options = (BOOLEAN)(pi->ip4_header_length > 20);
197 switch (pi->ip_proto) {
198 case 6: // TCP
199 case 17: // UDP
200 break;
201 default:
202 //KdPrint((__DRIVER_NAME " Not TCP/UDP (%d)\n", pi->ip_proto));
203 pi->parse_result = PARSE_UNKNOWN_TYPE;
204 return;
205 }
206 pi->tcp_header_length = (pi->header[XN_HDR_SIZE + pi->ip4_header_length + 12] & 0xf0) >> 2;
208 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + pi->tcp_header_length)) {
209 /* we don't actually need the tcp options to analyse the header */
210 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + MIN_TCP_HEADER_LENGTH))) {
211 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header (not including TCP Options))\n"));
212 pi->parse_result = PARSE_TOO_SMALL;
213 return;
214 }
215 }
217 if ((ULONG)XN_HDR_SIZE + pi->ip4_length > pi->total_length) {
218 //KdPrint((__DRIVER_NAME " XN_HDR_SIZE + ip4_length (%d) > total_length (%d)\n", XN_HDR_SIZE + pi->ip4_length, pi->total_length));
219 pi->parse_result = PARSE_UNKNOWN_TYPE;
220 return;
221 }
223 pi->tcp_length = pi->ip4_length - pi->ip4_header_length - pi->tcp_header_length;
224 pi->tcp_remaining = pi->tcp_length;
225 pi->tcp_seq = GET_NET_PULONG(&pi->header[XN_HDR_SIZE + pi->ip4_header_length + 4]);
226 pi->tcp_has_options = (BOOLEAN)(pi->tcp_header_length > 20);
227 if (pi->mss > 0 && pi->tcp_length > pi->mss)
228 pi->split_required = TRUE;
230 //KdPrint((__DRIVER_NAME " ip4_length = %d\n", pi->ip4_length));
231 //KdPrint((__DRIVER_NAME " tcp_length = %d\n", pi->tcp_length));
232 //FUNCTION_EXIT();
234 pi->parse_result = PARSE_OK;
235 }
237 BOOLEAN
238 XenNet_CheckIpHeaderSum(PUCHAR header, USHORT ip4_header_length) {
239 ULONG csum = 0;
240 USHORT i;
242 XN_ASSERT(ip4_header_length > 12);
243 XN_ASSERT(!(ip4_header_length & 1));
245 for (i = 0; i < ip4_header_length; i += 2) {
246 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
247 }
248 while (csum & 0xFFFF0000)
249 csum = (csum & 0xFFFF) + (csum >> 16);
250 return (BOOLEAN)(csum == 0xFFFF);
251 }
253 VOID
254 XenNet_SumIpHeader(PUCHAR header, USHORT ip4_header_length) {
255 ULONG csum = 0;
256 USHORT i;
258 XN_ASSERT(ip4_header_length > 12);
259 XN_ASSERT(!(ip4_header_length & 1));
261 header[XN_HDR_SIZE + 10] = 0;
262 header[XN_HDR_SIZE + 11] = 0;
263 for (i = 0; i < ip4_header_length; i += 2) {
264 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
265 }
266 while (csum & 0xFFFF0000)
267 csum = (csum & 0xFFFF) + (csum >> 16);
268 csum = ~csum;
269 SET_NET_USHORT(&header[XN_HDR_SIZE + 10], (USHORT)csum);
270 }
272 BOOLEAN
273 XenNet_FilterAcceptPacket(struct xennet_info *xi,packet_info_t *pi)
274 {
275 ULONG i;
276 BOOLEAN is_my_multicast = FALSE;
277 BOOLEAN is_directed = FALSE;
279 if (memcmp(xi->curr_mac_addr, pi->header, ETH_ALEN) == 0)
280 {
281 is_directed = TRUE;
282 }
283 else if (pi->is_multicast)
284 {
285 for (i = 0; i < xi->multicast_list_size; i++)
286 {
287 if (memcmp(xi->multicast_list[i], pi->header, 6) == 0)
288 break;
289 }
290 if (i < xi->multicast_list_size)
291 {
292 is_my_multicast = TRUE;
293 }
294 }
295 if (is_directed && (xi->packet_filter & NDIS_PACKET_TYPE_DIRECTED))
296 {
297 return TRUE;
298 }
299 if (is_my_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_MULTICAST))
300 {
301 return TRUE;
302 }
303 if (pi->is_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_ALL_MULTICAST))
304 {
305 return TRUE;
306 }
307 if (pi->is_broadcast && (xi->packet_filter & NDIS_PACKET_TYPE_BROADCAST))
308 {
309 return TRUE;
310 }
311 if (xi->packet_filter & NDIS_PACKET_TYPE_PROMISCUOUS)
312 {
313 return TRUE;
314 }
315 //return TRUE;
316 return FALSE;
317 }
319 static VOID
320 XenNet_RxTxDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
321 {
322 struct xennet_info *xi = context;
323 BOOLEAN dont_set_event;
325 UNREFERENCED_PARAMETER(dpc);
326 UNREFERENCED_PARAMETER(arg1);
327 UNREFERENCED_PARAMETER(arg2);
329 //FUNCTION_ENTER();
330 /* if Rx goes over its per-dpc quota then make sure TxBufferGC doesn't set an event as we are already guaranteed to be called again */
331 dont_set_event = XenNet_RxBufferCheck(xi);
332 XenNet_TxBufferGC(xi, dont_set_event);
333 //FUNCTION_EXIT();
334 }
336 static BOOLEAN
337 XenNet_HandleEvent_DIRQL(PVOID context)
338 {
339 struct xennet_info *xi = context;
340 //ULONG suspend_resume_state_pdo;
342 //FUNCTION_ENTER();
343 if (xi->device_state == DEVICE_STATE_ACTIVE || xi->device_state == DEVICE_STATE_DISCONNECTING) {
344 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
345 }
346 //FUNCTION_EXIT();
347 return TRUE;
348 }
350 NTSTATUS
351 XenNet_Connect(PVOID context, BOOLEAN suspend) {
352 NTSTATUS status;
353 struct xennet_info *xi = context;
354 PFN_NUMBER pfn;
355 ULONG qemu_hide_filter;
356 ULONG qemu_hide_flags_value;
357 int i;
358 ULONG state;
359 ULONG octet;
360 PCHAR tmp_string;
361 ULONG tmp_ulong;
363 if (!suspend) {
364 xi->handle = XnOpenDevice(xi->pdo, XenNet_DeviceCallback, xi);
365 }
366 if (!xi->handle) {
367 FUNCTION_MSG("Cannot open Xen device\n");
368 return STATUS_UNSUCCESSFUL;
369 }
370 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_HIDE_FLAGS, &qemu_hide_flags_value);
371 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_FILTER, &qemu_hide_filter);
372 if (!(qemu_hide_flags_value & QEMU_UNPLUG_ALL_NICS) || qemu_hide_filter) {
373 FUNCTION_MSG("inactive\n");
374 xi->device_state = DEVICE_STATE_INACTIVE;
375 /* continue with setup so all the flags and capabilities are correct */
376 }
378 if (xi->device_state != DEVICE_STATE_INACTIVE) {
379 for (i = 0; i <= 5 && xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised; i++) {
380 FUNCTION_MSG("Waiting for XenbusStateInitXxx\n");
381 if (xi->backend_state == XenbusStateClosed) {
382 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
383 }
384 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, NULL);
385 }
386 if (xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised) {
387 FUNCTION_MSG("Backend state timeout\n");
388 return STATUS_UNSUCCESSFUL;
389 }
390 if (!NT_SUCCESS(status = XnBindEvent(xi->handle, &xi->event_channel, XenNet_HandleEvent_DIRQL, xi))) {
391 FUNCTION_MSG("Cannot allocate event channel\n");
392 return STATUS_UNSUCCESSFUL;
393 }
394 FUNCTION_MSG("event_channel = %d\n", xi->event_channel);
395 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "event-channel", xi->event_channel);
396 xi->tx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
397 if (!xi->tx_sring) {
398 FUNCTION_MSG("Cannot allocate tx_sring\n");
399 return STATUS_UNSUCCESSFUL;
400 }
401 SHARED_RING_INIT(xi->tx_sring);
402 FRONT_RING_INIT(&xi->tx_ring, xi->tx_sring, PAGE_SIZE);
403 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->tx_sring).QuadPart >> PAGE_SHIFT);
404 FUNCTION_MSG("tx sring pfn = %d\n", (ULONG)pfn);
405 xi->tx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
406 FUNCTION_MSG("tx sring_gref = %d\n", xi->tx_sring_gref);
407 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "tx-ring-ref", xi->tx_sring_gref);
408 xi->rx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
409 if (!xi->rx_sring) {
410 FUNCTION_MSG("Cannot allocate rx_sring\n");
411 return STATUS_UNSUCCESSFUL;
412 }
413 SHARED_RING_INIT(xi->rx_sring);
414 FRONT_RING_INIT(&xi->rx_ring, xi->rx_sring, PAGE_SIZE);
415 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->rx_sring).QuadPart >> PAGE_SHIFT);
416 FUNCTION_MSG("rx sring pfn = %d\n", (ULONG)pfn);
417 xi->rx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
418 FUNCTION_MSG("rx sring_gref = %d\n", xi->rx_sring_gref);
419 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "rx-ring-ref", xi->rx_sring_gref);
421 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-copy", 1);
422 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-notify", 1);
423 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-no-csum-offload", !xi->frontend_csum_supported);
424 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-sg", (int)xi->frontend_sg_supported);
425 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-gso-tcpv4", !!xi->frontend_gso_value);
426 }
427 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-sg", &tmp_ulong);
428 if (tmp_ulong) {
429 xi->backend_sg_supported = TRUE;
430 }
431 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-gso-tcpv4", &tmp_ulong);
432 if (tmp_ulong) {
433 xi->backend_gso_value = xi->frontend_gso_value;
434 }
436 status = XnReadString(xi->handle, XN_BASE_BACKEND, "mac", &tmp_string);
437 state = 0;
438 octet = 0;
439 for (i = 0; state != 3 && i < (int)strlen(tmp_string); i++) {
440 if (octet == 6) {
441 state = 3;
442 break;
443 }
444 switch(state) {
445 case 0:
446 case 1:
447 if (tmp_string[i] >= '0' && tmp_string[i] <= '9') {
448 xi->perm_mac_addr[octet] |= (tmp_string[i] - '0') << ((1 - state) * 4);
449 state++;
450 } else if (tmp_string[i] >= 'A' && tmp_string[i] <= 'F') {
451 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'A' + 10) << ((1 - state) * 4);
452 state++;
453 } else if (tmp_string[i] >= 'a' && tmp_string[i] <= 'f') {
454 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'a' + 10) << ((1 - state) * 4);
455 state++;
456 } else {
457 state = 3;
458 }
459 break;
460 case 2:
461 if (tmp_string[i] == ':') {
462 octet++;
463 state = 0;
464 } else {
465 state = 3;
466 }
467 break;
468 }
469 }
470 if (octet != 5 || state != 2) {
471 FUNCTION_MSG("Failed to parse backend MAC address %s\n", tmp_string);
472 XnFreeMem(xi->handle, tmp_string);
473 return STATUS_UNSUCCESSFUL;
474 } else if ((xi->curr_mac_addr[0] & 0x03) != 0x02) {
475 /* only copy if curr_mac_addr is not a LUA */
476 memcpy(xi->curr_mac_addr, xi->perm_mac_addr, ETH_ALEN);
477 }
478 XnFreeMem(xi->handle, tmp_string);
479 FUNCTION_MSG("MAC address is %02X:%02X:%02X:%02X:%02X:%02X\n",
480 xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2],
481 xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5]);
483 if (xi->device_state != DEVICE_STATE_INACTIVE) {
484 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected);
486 for (i = 0; i <= 5 && xi->backend_state != XenbusStateConnected; i++) {
487 FUNCTION_MSG("Waiting for XenbusStateConnected\n");
488 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, NULL);
489 }
490 if (xi->backend_state != XenbusStateConnected) {
491 FUNCTION_MSG("Backend state timeout\n");
492 return STATUS_UNSUCCESSFUL;
493 }
494 XenNet_TxInit(xi);
495 XenNet_RxInit(xi);
496 }
498 /* we don't set device_state = DEVICE_STATE_ACTIVE here - has to be done during init once ndis is ready */
500 return STATUS_SUCCESS;
501 }
503 NTSTATUS
504 XenNet_Disconnect(PVOID context, BOOLEAN suspend) {
505 struct xennet_info *xi = (struct xennet_info *)context;
506 //PFN_NUMBER pfn;
507 NTSTATUS status;
509 if (xi->device_state != DEVICE_STATE_ACTIVE && xi->device_state != DEVICE_STATE_INACTIVE) {
510 FUNCTION_MSG("state not DEVICE_STATE_(IN)ACTIVE, is %d instead\n", xi->device_state);
511 FUNCTION_EXIT();
512 return STATUS_SUCCESS;
513 }
514 if (xi->device_state != DEVICE_STATE_INACTIVE) {
515 xi->device_state = DEVICE_STATE_DISCONNECTING;
516 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing);
517 while (xi->backend_state != XenbusStateClosing && xi->backend_state != XenbusStateClosed) {
518 FUNCTION_MSG("Waiting for XenbusStateClosing/Closed\n");
519 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, NULL);
520 }
521 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosed);
522 while (xi->backend_state != XenbusStateClosed) {
523 FUNCTION_MSG("Waiting for XenbusStateClosed\n");
524 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, NULL);
525 }
526 XnUnbindEvent(xi->handle, xi->event_channel);
528 #if NTDDI_VERSION < WINXP
529 KeFlushQueuedDpcs();
530 #endif
531 XenNet_TxShutdown(xi);
532 XenNet_RxShutdown(xi);
533 XnEndAccess(xi->handle, xi->rx_sring_gref, FALSE, XENNET_POOL_TAG);
534 ExFreePoolWithTag(xi->rx_sring, XENNET_POOL_TAG);
535 XnEndAccess(xi->handle, xi->tx_sring_gref, FALSE, XENNET_POOL_TAG);
536 ExFreePoolWithTag(xi->tx_sring, XENNET_POOL_TAG);
537 }
538 if (!suspend) {
539 XnCloseDevice(xi->handle);
540 }
541 xi->device_state = DEVICE_STATE_DISCONNECTED;
542 return STATUS_SUCCESS;
543 }
545 VOID
546 XenNet_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) {
547 struct xennet_info *xi = (struct xennet_info *)context;
548 ULONG state;
550 FUNCTION_ENTER();
551 switch (callback_type) {
552 case XN_DEVICE_CALLBACK_BACKEND_STATE:
553 state = (ULONG)(ULONG_PTR)value;
554 if (state == xi->backend_state) {
555 FUNCTION_MSG("same state %d\n", state);
556 FUNCTION_EXIT();
557 }
558 FUNCTION_MSG("XenBusState = %d -> %d\n", xi->backend_state, state);
559 xi->backend_state = state;
560 KeSetEvent(&xi->backend_event, 0, FALSE);
561 break;
562 case XN_DEVICE_CALLBACK_SUSPEND:
563 FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND");
564 XenNet_Disconnect(xi, TRUE);
565 break;
566 case XN_DEVICE_CALLBACK_RESUME:
567 FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME");
568 xi->device_state = DEVICE_STATE_INITIALISING;
569 XenNet_Connect(xi, TRUE);
570 if (xi->device_state != DEVICE_STATE_INACTIVE) {
571 xi->device_state = DEVICE_STATE_ACTIVE;
572 }
573 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
574 break;
575 }
576 FUNCTION_EXIT();
577 }