win-pvdrivers

view xennet/xennet_common.c @ 1085:896402519f15

check for errors in xennet when getting backend feature support. Fix a stop/start problem in xennet.
author James Harper <james.harper@bendigoit.com.au>
date Thu Dec 12 19:55:52 2013 +1100 (2013-12-12)
parents a60d401aa020
children 27bd2a5a4704
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Increase the header to a certain size */
24 BOOLEAN
25 XenNet_BuildHeader(packet_info_t *pi, PUCHAR header, ULONG new_header_size)
26 {
27 ULONG bytes_remaining;
29 //FUNCTION_ENTER();
31 if (!header)
32 header = pi->header;
34 if (new_header_size > pi->total_length) {
35 new_header_size = pi->total_length;
36 }
38 if (new_header_size <= pi->header_length) {
39 //FUNCTION_EXIT();
40 return TRUE; /* header is already at least the required size */
41 }
43 if (header == pi->first_mdl_virtual) {
44 XN_ASSERT(new_header_size <= PAGE_SIZE);
45 /* still working in the first buffer */
46 if (new_header_size <= pi->first_mdl_length) {
47 /* Trivially expand header_length */
48 pi->header_length = new_header_size;
49 if (pi->header_length == pi->first_mdl_length) {
50 #if NTDDI_VERSION < NTDDI_VISTA
51 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
52 #else
53 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
54 #endif
55 pi->curr_mdl_offset = 0;
56 if (pi->curr_pb)
57 pi->curr_pb = pi->curr_pb->next;
58 } else {
59 pi->curr_mdl_offset = (USHORT)new_header_size;
60 }
61 }
62 }
64 bytes_remaining = new_header_size - pi->header_length;
66 while (bytes_remaining && pi->curr_mdl) {
67 ULONG copy_size;
69 XN_ASSERT(pi->curr_mdl);
70 if (MmGetMdlByteCount(pi->curr_mdl)) {
71 PUCHAR src_addr;
72 src_addr = MmGetSystemAddressForMdlSafe(pi->curr_mdl, NormalPagePriority);
73 if (!src_addr) {
74 //FUNCTION_EXIT();
75 return FALSE;
76 }
77 copy_size = min(bytes_remaining, MmGetMdlByteCount(pi->curr_mdl) - pi->curr_mdl_offset);
78 memcpy(header + pi->header_length,
79 src_addr + pi->curr_mdl_offset, copy_size);
80 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + copy_size);
81 pi->header_length += copy_size;
82 bytes_remaining -= copy_size;
83 }
84 if (pi->curr_mdl_offset == MmGetMdlByteCount(pi->curr_mdl)) {
85 #if NTDDI_VERSION < NTDDI_VISTA
86 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
87 #else
88 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
89 #endif
90 if (pi->curr_pb)
91 pi->curr_pb = pi->curr_pb->next;
92 pi->curr_mdl_offset = 0;
93 }
94 }
95 //KdPrint((__DRIVER_NAME " C bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
96 if (bytes_remaining) {
97 //KdPrint((__DRIVER_NAME " bytes_remaining\n"));
98 //FUNCTION_EXIT();
99 return FALSE;
100 }
101 //FUNCTION_EXIT();
102 return TRUE;
103 }
105 VOID
106 XenNet_ParsePacketHeader(packet_info_t *pi, PUCHAR alt_buffer, ULONG min_header_size)
107 {
108 //FUNCTION_ENTER();
110 XN_ASSERT(pi->first_mdl);
112 #if NTDDI_VERSION < NTDDI_VISTA
113 NdisQueryBufferSafe(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
114 #else
115 NdisQueryMdl(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
116 #endif
117 pi->curr_mdl = pi->first_mdl;
118 if (alt_buffer)
119 pi->header = alt_buffer;
120 else
121 pi->header = pi->first_mdl_virtual;
123 pi->header_length = 0;
124 pi->curr_mdl_offset = pi->first_mdl_offset;
126 pi->ip_proto = 0;
127 pi->ip_version = 0;
128 pi->ip4_header_length = 0;
129 pi->ip4_length = 0;
130 pi->tcp_header_length = 0;
131 pi->tcp_length = 0;
132 pi->split_required = 0;
134 XenNet_BuildHeader(pi, NULL, min_header_size);
136 if (!XenNet_BuildHeader(pi, NULL, (ULONG)XN_HDR_SIZE)) {
137 //KdPrint((__DRIVER_NAME " packet too small (Ethernet Header)\n"));
138 pi->parse_result = PARSE_TOO_SMALL;
139 return;
140 }
142 if (pi->header[0] == 0xFF && pi->header[1] == 0xFF
143 && pi->header[2] == 0xFF && pi->header[3] == 0xFF
144 && pi->header[4] == 0xFF && pi->header[5] == 0xFF) {
145 pi->is_broadcast = TRUE;
146 } else if (pi->header[0] & 0x01) {
147 pi->is_multicast = TRUE;
148 }
150 switch (GET_NET_PUSHORT(&pi->header[12])) { // L2 protocol field
151 case 0x0800: /* IPv4 */
152 //KdPrint((__DRIVER_NAME " IP\n"));
153 if (pi->header_length < (ULONG)(XN_HDR_SIZE + 20)) {
154 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + 20))) {
155 FUNCTION_MSG("packet too small (IP Header)\n");
156 pi->parse_result = PARSE_TOO_SMALL;
157 return;
158 }
159 }
160 pi->ip_version = (pi->header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
161 if (pi->ip_version != 4) {
162 //KdPrint((__DRIVER_NAME " ip_version = %d\n", pi->ip_version));
163 pi->parse_result = PARSE_UNKNOWN_TYPE;
164 return;
165 }
166 pi->ip4_header_length = (pi->header[XN_HDR_SIZE + 0] & 0x0F) << 2;
167 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20)) {
168 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20))) {
169 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header)\n"));
170 pi->parse_result = PARSE_TOO_SMALL;
171 return;
172 }
173 }
174 break;
175 case 0x86DD: /* IPv6 */
176 //KdPrint((__DRIVER_NAME " IPv6\n"));
177 //KdPrint((__DRIVER_NAME " (not currently used)\n"));
178 pi->parse_result = PARSE_UNKNOWN_TYPE;
179 return;
180 default:
181 //KdPrint((__DRIVER_NAME " Not IP (%04x)\n", GET_NET_PUSHORT(&pi->header[12])));
182 pi->parse_result = PARSE_UNKNOWN_TYPE;
183 return;
184 }
185 pi->ip_proto = pi->header[XN_HDR_SIZE + 9];
186 pi->ip4_length = GET_NET_PUSHORT(&pi->header[XN_HDR_SIZE + 2]);
187 pi->ip_has_options = (BOOLEAN)(pi->ip4_header_length > 20);
188 switch (pi->ip_proto) {
189 case 6: // TCP
190 case 17: // UDP
191 break;
192 default:
193 //KdPrint((__DRIVER_NAME " Not TCP/UDP (%d)\n", pi->ip_proto));
194 pi->parse_result = PARSE_UNKNOWN_TYPE;
195 return;
196 }
197 pi->tcp_header_length = (pi->header[XN_HDR_SIZE + pi->ip4_header_length + 12] & 0xf0) >> 2;
199 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + pi->tcp_header_length)) {
200 /* we don't actually need the tcp options to analyse the header */
201 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + MIN_TCP_HEADER_LENGTH))) {
202 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header (not including TCP Options))\n"));
203 pi->parse_result = PARSE_TOO_SMALL;
204 return;
205 }
206 }
208 if ((ULONG)XN_HDR_SIZE + pi->ip4_length > pi->total_length) {
209 //KdPrint((__DRIVER_NAME " XN_HDR_SIZE + ip4_length (%d) > total_length (%d)\n", XN_HDR_SIZE + pi->ip4_length, pi->total_length));
210 pi->parse_result = PARSE_UNKNOWN_TYPE;
211 return;
212 }
214 pi->tcp_length = pi->ip4_length - pi->ip4_header_length - pi->tcp_header_length;
215 pi->tcp_remaining = pi->tcp_length;
216 pi->tcp_seq = GET_NET_PULONG(&pi->header[XN_HDR_SIZE + pi->ip4_header_length + 4]);
217 pi->tcp_has_options = (BOOLEAN)(pi->tcp_header_length > 20);
218 if (pi->mss > 0 && pi->tcp_length > pi->mss)
219 pi->split_required = TRUE;
221 //KdPrint((__DRIVER_NAME " ip4_length = %d\n", pi->ip4_length));
222 //KdPrint((__DRIVER_NAME " tcp_length = %d\n", pi->tcp_length));
223 //FUNCTION_EXIT();
225 pi->parse_result = PARSE_OK;
226 }
228 BOOLEAN
229 XenNet_CheckIpHeaderSum(PUCHAR header, USHORT ip4_header_length) {
230 ULONG csum = 0;
231 USHORT i;
233 XN_ASSERT(ip4_header_length > 12);
234 XN_ASSERT(!(ip4_header_length & 1));
236 for (i = 0; i < ip4_header_length; i += 2) {
237 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
238 }
239 while (csum & 0xFFFF0000)
240 csum = (csum & 0xFFFF) + (csum >> 16);
241 return (BOOLEAN)(csum == 0xFFFF);
242 }
244 VOID
245 XenNet_SumIpHeader(PUCHAR header, USHORT ip4_header_length) {
246 ULONG csum = 0;
247 USHORT i;
249 XN_ASSERT(ip4_header_length > 12);
250 XN_ASSERT(!(ip4_header_length & 1));
252 header[XN_HDR_SIZE + 10] = 0;
253 header[XN_HDR_SIZE + 11] = 0;
254 for (i = 0; i < ip4_header_length; i += 2) {
255 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
256 }
257 while (csum & 0xFFFF0000)
258 csum = (csum & 0xFFFF) + (csum >> 16);
259 csum = ~csum;
260 SET_NET_USHORT(&header[XN_HDR_SIZE + 10], (USHORT)csum);
261 }
263 BOOLEAN
264 XenNet_FilterAcceptPacket(struct xennet_info *xi,packet_info_t *pi)
265 {
266 ULONG i;
267 BOOLEAN is_my_multicast = FALSE;
268 BOOLEAN is_directed = FALSE;
270 if (memcmp(xi->curr_mac_addr, pi->header, ETH_ALEN) == 0)
271 {
272 is_directed = TRUE;
273 }
274 else if (pi->is_multicast)
275 {
276 for (i = 0; i < xi->multicast_list_size; i++)
277 {
278 if (memcmp(xi->multicast_list[i], pi->header, 6) == 0)
279 break;
280 }
281 if (i < xi->multicast_list_size)
282 {
283 is_my_multicast = TRUE;
284 }
285 }
286 if (is_directed && (xi->packet_filter & NDIS_PACKET_TYPE_DIRECTED))
287 {
288 return TRUE;
289 }
290 if (is_my_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_MULTICAST))
291 {
292 return TRUE;
293 }
294 if (pi->is_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_ALL_MULTICAST))
295 {
296 return TRUE;
297 }
298 if (pi->is_broadcast && (xi->packet_filter & NDIS_PACKET_TYPE_BROADCAST))
299 {
300 return TRUE;
301 }
302 if (xi->packet_filter & NDIS_PACKET_TYPE_PROMISCUOUS)
303 {
304 return TRUE;
305 }
306 //return TRUE;
307 return FALSE;
308 }
310 static VOID
311 XenNet_RxTxDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
312 {
313 struct xennet_info *xi = context;
314 BOOLEAN dont_set_event;
316 UNREFERENCED_PARAMETER(dpc);
317 UNREFERENCED_PARAMETER(arg1);
318 UNREFERENCED_PARAMETER(arg2);
320 //FUNCTION_ENTER();
321 /* if Rx goes over its per-dpc quota then make sure TxBufferGC doesn't set an event as we are already guaranteed to be called again */
322 dont_set_event = XenNet_RxBufferCheck(xi);
323 XenNet_TxBufferGC(xi, dont_set_event);
324 //FUNCTION_EXIT();
325 }
327 static BOOLEAN
328 XenNet_HandleEvent_DIRQL(PVOID context)
329 {
330 struct xennet_info *xi = context;
331 //ULONG suspend_resume_state_pdo;
333 //FUNCTION_ENTER();
334 if (xi->device_state == DEVICE_STATE_ACTIVE || xi->device_state == DEVICE_STATE_DISCONNECTING) {
335 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
336 }
337 //FUNCTION_EXIT();
338 return TRUE;
339 }
341 NTSTATUS
342 XenNet_Connect(PVOID context, BOOLEAN suspend) {
343 NTSTATUS status;
344 struct xennet_info *xi = context;
345 PFN_NUMBER pfn;
346 ULONG qemu_hide_filter;
347 ULONG qemu_hide_flags_value;
348 int i;
349 ULONG state;
350 ULONG octet;
351 PCHAR tmp_string;
352 ULONG tmp_ulong;
353 LARGE_INTEGER timeout;
355 if (!suspend) {
356 xi->handle = XnOpenDevice(xi->pdo, XenNet_DeviceCallback, xi);
357 }
358 if (!xi->handle) {
359 FUNCTION_MSG("Cannot open Xen device\n");
360 return STATUS_UNSUCCESSFUL;
361 }
362 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_HIDE_FLAGS, &qemu_hide_flags_value);
363 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_FILTER, &qemu_hide_filter);
364 if (!(qemu_hide_flags_value & QEMU_UNPLUG_ALL_NICS) || qemu_hide_filter) {
365 FUNCTION_MSG("inactive\n");
366 xi->device_state = DEVICE_STATE_INACTIVE;
367 /* continue with setup so all the flags and capabilities are correct */
368 }
369 /* explicitly set the frontend state as it will still be 'closed' if we are restarting the adapter */
370 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
371 if (xi->device_state != DEVICE_STATE_INACTIVE) {
372 for (i = 0; i <= 5 && xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised; i++) {
373 FUNCTION_MSG("Waiting for XenbusStateInitXxx\n");
374 if (xi->backend_state == XenbusStateClosed) {
375 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
376 }
377 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
378 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
379 }
380 if (xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised) {
381 FUNCTION_MSG("Backend state timeout\n");
382 return STATUS_UNSUCCESSFUL;
383 }
384 if (!NT_SUCCESS(status = XnBindEvent(xi->handle, &xi->event_channel, XenNet_HandleEvent_DIRQL, xi))) {
385 FUNCTION_MSG("Cannot allocate event channel\n");
386 return STATUS_UNSUCCESSFUL;
387 }
388 FUNCTION_MSG("event_channel = %d\n", xi->event_channel);
389 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "event-channel", xi->event_channel);
390 xi->tx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
391 if (!xi->tx_sring) {
392 FUNCTION_MSG("Cannot allocate tx_sring\n");
393 return STATUS_UNSUCCESSFUL;
394 }
395 SHARED_RING_INIT(xi->tx_sring);
396 FRONT_RING_INIT(&xi->tx_ring, xi->tx_sring, PAGE_SIZE);
397 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->tx_sring).QuadPart >> PAGE_SHIFT);
398 FUNCTION_MSG("tx sring pfn = %d\n", (ULONG)pfn);
399 xi->tx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
400 FUNCTION_MSG("tx sring_gref = %d\n", xi->tx_sring_gref);
401 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "tx-ring-ref", xi->tx_sring_gref);
402 xi->rx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
403 if (!xi->rx_sring) {
404 FUNCTION_MSG("Cannot allocate rx_sring\n");
405 return STATUS_UNSUCCESSFUL;
406 }
407 SHARED_RING_INIT(xi->rx_sring);
408 FRONT_RING_INIT(&xi->rx_ring, xi->rx_sring, PAGE_SIZE);
409 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->rx_sring).QuadPart >> PAGE_SHIFT);
410 FUNCTION_MSG("rx sring pfn = %d\n", (ULONG)pfn);
411 xi->rx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
412 FUNCTION_MSG("rx sring_gref = %d\n", xi->rx_sring_gref);
413 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "rx-ring-ref", xi->rx_sring_gref);
415 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-copy", 1);
416 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-notify", 1);
417 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-no-csum-offload", !xi->frontend_csum_supported);
418 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-sg", (int)xi->frontend_sg_supported);
419 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-gso-tcpv4", !!xi->frontend_gso_value);
420 }
422 /* backend always supports checksum offload */
423 xi->backend_csum_supported = TRUE;
425 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-sg", &tmp_ulong);
426 if (NT_SUCCESS(status) && tmp_ulong) {
427 xi->backend_sg_supported = TRUE;
428 } else {
429 xi->backend_sg_supported = FALSE;
430 }
431 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-gso-tcpv4", &tmp_ulong);
432 if (NT_SUCCESS(status) && tmp_ulong) {
433 xi->backend_gso_value = xi->frontend_gso_value;
434 } else {
435 xi->backend_gso_value = FALSE;
436 }
438 status = XnReadString(xi->handle, XN_BASE_BACKEND, "mac", &tmp_string);
439 state = 0;
440 octet = 0;
441 for (i = 0; state != 3 && i < (int)strlen(tmp_string); i++) {
442 if (octet == 6) {
443 state = 3;
444 break;
445 }
446 switch(state) {
447 case 0:
448 case 1:
449 if (tmp_string[i] >= '0' && tmp_string[i] <= '9') {
450 xi->perm_mac_addr[octet] |= (tmp_string[i] - '0') << ((1 - state) * 4);
451 state++;
452 } else if (tmp_string[i] >= 'A' && tmp_string[i] <= 'F') {
453 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'A' + 10) << ((1 - state) * 4);
454 state++;
455 } else if (tmp_string[i] >= 'a' && tmp_string[i] <= 'f') {
456 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'a' + 10) << ((1 - state) * 4);
457 state++;
458 } else {
459 state = 3;
460 }
461 break;
462 case 2:
463 if (tmp_string[i] == ':') {
464 octet++;
465 state = 0;
466 } else {
467 state = 3;
468 }
469 break;
470 }
471 }
472 if (octet != 5 || state != 2) {
473 FUNCTION_MSG("Failed to parse backend MAC address %s\n", tmp_string);
474 XnFreeMem(xi->handle, tmp_string);
475 return STATUS_UNSUCCESSFUL;
476 } else if ((xi->curr_mac_addr[0] & 0x03) != 0x02) {
477 /* only copy if curr_mac_addr is not a LUA */
478 memcpy(xi->curr_mac_addr, xi->perm_mac_addr, ETH_ALEN);
479 }
480 XnFreeMem(xi->handle, tmp_string);
481 FUNCTION_MSG("MAC address is %02X:%02X:%02X:%02X:%02X:%02X\n",
482 xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2],
483 xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5]);
485 if (xi->device_state != DEVICE_STATE_INACTIVE) {
486 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected);
488 for (i = 0; i <= 5 && xi->backend_state != XenbusStateConnected; i++) {
489 FUNCTION_MSG("Waiting for XenbusStateConnected\n");
490 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
491 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
492 }
493 if (xi->backend_state != XenbusStateConnected) {
494 FUNCTION_MSG("Backend state timeout\n");
495 return STATUS_UNSUCCESSFUL;
496 }
497 XenNet_TxInit(xi);
498 XenNet_RxInit(xi);
499 }
501 /* we don't set device_state = DEVICE_STATE_ACTIVE here - has to be done during init once ndis is ready */
503 return STATUS_SUCCESS;
504 }
506 NTSTATUS
507 XenNet_Disconnect(PVOID context, BOOLEAN suspend) {
508 struct xennet_info *xi = (struct xennet_info *)context;
509 //PFN_NUMBER pfn;
510 LARGE_INTEGER timeout;
511 NTSTATUS status;
513 if (xi->device_state != DEVICE_STATE_ACTIVE && xi->device_state != DEVICE_STATE_INACTIVE) {
514 FUNCTION_MSG("state not DEVICE_STATE_(IN)ACTIVE, is %d instead\n", xi->device_state);
515 FUNCTION_EXIT();
516 return STATUS_SUCCESS;
517 }
518 if (xi->device_state != DEVICE_STATE_INACTIVE) {
519 xi->device_state = DEVICE_STATE_DISCONNECTING;
520 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing);
521 while (xi->backend_state != XenbusStateClosing && xi->backend_state != XenbusStateClosed) {
522 FUNCTION_MSG("Waiting for XenbusStateClosing/Closed\n");
523 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
524 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
525 }
526 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosed);
527 while (xi->backend_state != XenbusStateClosed) {
528 FUNCTION_MSG("Waiting for XenbusStateClosed\n");
529 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
530 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
531 }
532 XnUnbindEvent(xi->handle, xi->event_channel);
534 #if NTDDI_VERSION < WINXP
535 KeFlushQueuedDpcs();
536 #endif
537 XenNet_TxShutdown(xi);
538 XenNet_RxShutdown(xi);
539 XnEndAccess(xi->handle, xi->rx_sring_gref, FALSE, XENNET_POOL_TAG);
540 ExFreePoolWithTag(xi->rx_sring, XENNET_POOL_TAG);
541 XnEndAccess(xi->handle, xi->tx_sring_gref, FALSE, XENNET_POOL_TAG);
542 ExFreePoolWithTag(xi->tx_sring, XENNET_POOL_TAG);
543 }
544 if (!suspend) {
545 XnCloseDevice(xi->handle);
546 }
547 xi->device_state = DEVICE_STATE_DISCONNECTED;
548 return STATUS_SUCCESS;
549 }
551 VOID
552 XenNet_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) {
553 struct xennet_info *xi = (struct xennet_info *)context;
554 ULONG state;
555 NTSTATUS status;
557 FUNCTION_ENTER();
558 switch (callback_type) {
559 case XN_DEVICE_CALLBACK_BACKEND_STATE:
560 state = (ULONG)(ULONG_PTR)value;
561 if (state == xi->backend_state) {
562 FUNCTION_MSG("same state %d\n", state);
563 FUNCTION_EXIT();
564 }
565 FUNCTION_MSG("XenBusState = %d -> %d\n", xi->backend_state, state);
566 xi->backend_state = state;
567 KeSetEvent(&xi->backend_event, 0, FALSE);
568 break;
569 case XN_DEVICE_CALLBACK_SUSPEND:
570 FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND");
571 XenNet_Disconnect(xi, TRUE);
572 break;
573 case XN_DEVICE_CALLBACK_RESUME:
574 FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME");
575 xi->device_state = DEVICE_STATE_INITIALISING;
576 status = XenNet_Connect(xi, TRUE);
577 // TODO: what to do here if not success?
578 if (xi->device_state != DEVICE_STATE_INACTIVE) {
579 xi->device_state = DEVICE_STATE_ACTIVE;
580 }
581 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
582 break;
583 }
584 FUNCTION_EXIT();
585 }