ia64/linux-2.6.18-xen.hg

view drivers/net/sfc/sfc_resource/efx_vi_shm.c @ 847:ad4d307bf9ce

net sfc: Update sfc and sfc_resource driver to latest release

...and update sfc_netfront, sfc_netback, sfc_netutil for any API changes

sfc_netback: Fix asymmetric use of SFC buffer table alloc and free
sfc_netback: Clean up if no SFC accel device found
sfc_netback: Gracefully handle case where page grant fails
sfc_netback: Disable net acceleration if the physical link goes down
sfc_netfront: Less verbose error messages, more verbose counters for
rx discard errors
sfc_netfront: Gracefully handle case where SFC netfront fails during
initialisation

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:59:10 2009 +0100 (2009-03-31)
parents e4dd072db259
children
line source
1 /****************************************************************************
2 * Driver for Solarflare network controllers -
3 * resource management for Xen backend, OpenOnload, etc
4 * (including support for SFE4001 10GBT NIC)
5 *
6 * This file provides implementation of EFX VI API, used from Xen
7 * acceleration driver.
8 *
9 * Copyright 2005-2007: Solarflare Communications Inc,
10 * 9501 Jeronimo Road, Suite 250,
11 * Irvine, CA 92618, USA
12 *
13 * Developed and maintained by Solarflare Communications:
14 * <linux-xen-drivers@solarflare.com>
15 * <onload-dev@solarflare.com>
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License version 2 as published
20 * by the Free Software Foundation, incorporated herein by reference.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 ****************************************************************************
31 */
33 #include "linux_resource_internal.h"
34 #include <ci/efrm/nic_table.h>
35 #include <ci/efrm/vi_resource_manager.h>
36 #include <ci/driver/resource/efx_vi.h>
37 #include <ci/efrm/filter.h>
38 #include <ci/efrm/buffer_table.h>
39 #include <linux/pci.h>
40 #include "kernel_compat.h"
42 #if EFX_VI_STATIC_FILTERS
43 struct filter_list_t {
44 struct filter_list_t *next;
45 struct filter_resource *fres;
46 };
47 #endif
49 struct efx_vi_state {
50 struct vi_resource *vi_res;
52 int nic_index;
54 void (*callback_fn)(void *arg, int is_timeout);
55 void *callback_arg;
57 struct completion flush_completion;
59 #if EFX_VI_STATIC_FILTERS
60 struct filter_list_t fres[EFX_VI_STATIC_FILTERS];
61 struct filter_list_t *free_fres;
62 struct filter_list_t *used_fres;
63 #endif
64 };
66 static void efx_vi_flush_complete(void *state_void)
67 {
68 struct efx_vi_state *state = (struct efx_vi_state *)state_void;
70 complete(&state->flush_completion);
71 }
73 static inline int alloc_ep(struct efx_vi_state *state)
74 {
75 int rc;
77 rc = efrm_vi_resource_alloc(NULL, EFHW_VI_JUMBO_EN,
78 efx_vi_eventq_size,
79 FALCON_DMA_Q_DEFAULT_TX_SIZE,
80 FALCON_DMA_Q_DEFAULT_RX_SIZE,
81 0, 0, &state->vi_res, NULL, NULL, NULL,
82 NULL);
83 if (rc < 0) {
84 EFRM_ERR("%s: ERROR efrm_vi_resource_alloc error %d",
85 __FUNCTION__, rc);
86 return rc;
87 }
89 efrm_vi_register_flush_callback(state->vi_res, &efx_vi_flush_complete,
90 (void *)state);
92 return 0;
93 }
95 static int free_ep(struct efx_vi_state *efx_state)
96 {
97 efrm_vi_resource_release(efx_state->vi_res);
99 return 0;
100 }
102 #if EFX_VI_STATIC_FILTERS
103 static int efx_vi_alloc_static_filters(struct efx_vi_state *efx_state)
104 {
105 int i;
106 int rc;
108 efx_state->free_fres = efx_state->used_fres = NULL;
110 for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) {
111 rc = efrm_filter_resource_alloc(efx_state->vi_res,
112 &efx_state->fres[i].fres);
113 if (rc < 0) {
114 EFRM_ERR("%s: efrm_filter_resource_alloc failed: %d",
115 __FUNCTION__, rc);
116 while (i > 0) {
117 i--;
118 efrm_filter_resource_release(efx_state->
119 fres[i].fres);
120 }
121 efx_state->free_fres = NULL;
122 return rc;
123 }
124 efx_state->fres[i].next = efx_state->free_fres;
125 efx_state->free_fres = &efx_state->fres[i];
126 }
128 return 0;
129 }
130 #endif
132 int efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index)
133 {
134 struct efx_vi_state *efx_state;
135 int rc;
137 BUG_ON(nic_index < 0 || nic_index >= EFHW_MAX_NR_DEVS);
139 efx_state = kmalloc(sizeof(struct efx_vi_state), GFP_KERNEL);
141 if (!efx_state) {
142 EFRM_ERR("%s: failed to allocate memory for efx_vi_state",
143 __FUNCTION__);
144 rc = -ENOMEM;
145 goto fail;
146 }
148 efx_state->nic_index = nic_index;
149 init_completion(&efx_state->flush_completion);
151 /* basically allocate_pt_endpoint() */
152 rc = alloc_ep(efx_state);
153 if (rc) {
154 EFRM_ERR("%s: alloc_ep failed: %d", __FUNCTION__, rc);
155 goto fail_no_pt;
156 }
157 #if EFX_VI_STATIC_FILTERS
158 /* Statically allocate a set of filter resources - removes the
159 restriction on not being able to use efx_vi_filter() from
160 in_atomic() */
161 rc = efx_vi_alloc_static_filters(efx_state);
162 if (rc)
163 goto fail_no_filters;
164 #endif
166 *vih_out = efx_state;
168 return 0;
169 #if EFX_VI_STATIC_FILTERS
170 fail_no_filters:
171 free_ep(efx_state);
172 #endif
173 fail_no_pt:
174 kfree(efx_state);
175 fail:
176 return rc;
177 }
178 EXPORT_SYMBOL(efx_vi_alloc);
180 void efx_vi_free(struct efx_vi_state *vih)
181 {
182 struct efx_vi_state *efx_state = vih;
184 /* TODO flush dma channels, init dma queues?. See ef_free_vnic() */
185 #if EFX_VI_STATIC_FILTERS
186 int i;
188 for (i = 0; i < EFX_VI_STATIC_FILTERS; i++)
189 efrm_filter_resource_release(efx_state->fres[i].fres);
190 #endif
192 if (efx_state->vi_res)
193 free_ep(efx_state);
195 kfree(efx_state);
196 }
197 EXPORT_SYMBOL(efx_vi_free);
199 void efx_vi_reset(struct efx_vi_state *vih)
200 {
201 struct efx_vi_state *efx_state = vih;
203 efrm_pt_flush(efx_state->vi_res);
205 while (wait_for_completion_timeout(&efx_state->flush_completion, HZ)
206 == 0)
207 efrm_vi_resource_flush_retry(efx_state->vi_res);
209 /* Bosch the eventq */
210 efrm_eventq_reset(efx_state->vi_res, 0);
211 return;
212 }
213 EXPORT_SYMBOL(efx_vi_reset);
215 static void
216 efx_vi_eventq_callback(void *context, int is_timeout, struct efhw_nic *nic)
217 {
218 struct efx_vi_state *efx_state = (struct efx_vi_state *)context;
220 EFRM_ASSERT(efx_state->callback_fn);
222 return efx_state->callback_fn(efx_state->callback_arg, is_timeout);
223 }
225 int
226 efx_vi_eventq_register_callback(struct efx_vi_state *vih,
227 void (*callback)(void *context, int is_timeout),
228 void *context)
229 {
230 struct efx_vi_state *efx_state = vih;
232 efx_state->callback_fn = callback;
233 efx_state->callback_arg = context;
235 /* Register the eventq timeout event callback */
236 efrm_eventq_register_callback(efx_state->vi_res,
237 efx_vi_eventq_callback, efx_state);
239 return 0;
240 }
241 EXPORT_SYMBOL(efx_vi_eventq_register_callback);
243 int efx_vi_eventq_kill_callback(struct efx_vi_state *vih)
244 {
245 struct efx_vi_state *efx_state = vih;
247 if (efx_state->vi_res->evq_callback_fn)
248 efrm_eventq_kill_callback(efx_state->vi_res);
250 efx_state->callback_fn = NULL;
251 efx_state->callback_arg = NULL;
253 return 0;
254 }
255 EXPORT_SYMBOL(efx_vi_eventq_kill_callback);
257 struct efx_vi_dma_map_state {
258 struct efhw_buffer_table_allocation bt_handle;
259 int n_pages;
260 dma_addr_t *dma_addrs;
261 };
263 int
264 efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
265 int n_pages, struct efx_vi_dma_map_state **dmh_out)
266 {
267 struct efx_vi_state *efx_state = vih;
268 int order = fls(n_pages - 1), rc, i, evq_id;
269 dma_addr_t dma_addr;
270 struct efx_vi_dma_map_state *dm_state;
272 if (n_pages != (1 << order)) {
273 EFRM_WARN("%s: Can only allocate buffers in power of 2 "
274 "sizes (not %d)", __FUNCTION__, n_pages);
275 return -EINVAL;
276 }
278 dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
279 if (!dm_state)
280 return -ENOMEM;
282 dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
283 GFP_KERNEL);
284 if (!dm_state->dma_addrs) {
285 kfree(dm_state);
286 return -ENOMEM;
287 }
289 rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
290 if (rc < 0) {
291 kfree(dm_state->dma_addrs);
292 kfree(dm_state);
293 return rc;
294 }
296 evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
297 for (i = 0; i < n_pages; i++) {
298 /* TODO do we need to get_page() here ? */
300 dma_addr = pci_map_page
301 (linux_efhw_nic(efrm_nic_tablep->nic[efx_state->nic_index])->
302 pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE);
304 efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
305 evq_id);
307 dm_state->dma_addrs[i] = dma_addr;
309 /* Would be nice to not have to call commit each time, but
310 * comment says there are hardware restrictions on how often
311 * you can go without it, so do this to be safe */
312 efrm_buffer_table_commit();
313 }
315 dm_state->n_pages = n_pages;
317 *dmh_out = dm_state;
319 return 0;
320 }
321 EXPORT_SYMBOL(efx_vi_dma_map_pages);
323 /* Function needed as Xen can't get pages for grants in dom0, but can
324 get dma address */
325 int
326 efx_vi_dma_map_addrs(struct efx_vi_state *vih,
327 unsigned long long *bus_dev_addrs,
328 int n_pages, struct efx_vi_dma_map_state **dmh_out)
329 {
330 struct efx_vi_state *efx_state = vih;
331 int order = fls(n_pages - 1), rc, i, evq_id;
332 dma_addr_t dma_addr;
333 struct efx_vi_dma_map_state *dm_state;
335 if (n_pages != (1 << order)) {
336 EFRM_WARN("%s: Can only allocate buffers in power of 2 "
337 "sizes (not %d)", __FUNCTION__, n_pages);
338 return -EINVAL;
339 }
341 dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
342 if (!dm_state)
343 return -ENOMEM;
345 dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
346 GFP_KERNEL);
347 if (!dm_state->dma_addrs) {
348 kfree(dm_state);
349 return -ENOMEM;
350 }
352 rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
353 if (rc < 0) {
354 kfree(dm_state->dma_addrs);
355 kfree(dm_state);
356 return rc;
357 }
359 evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
360 #if 0
361 EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n",
362 __FUNCTION__, n_pages, evq_id,
363 dm_state->bt_handle.base,
364 dm_state->bt_handle.base + n_pages);
365 #endif
366 for (i = 0; i < n_pages; i++) {
368 dma_addr = (dma_addr_t)bus_dev_addrs[i];
370 efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
371 evq_id);
373 dm_state->dma_addrs[i] = dma_addr;
375 /* Would be nice to not have to call commit each time, but
376 * comment says there are hardware restrictions on how often
377 * you can go without it, so do this to be safe */
378 efrm_buffer_table_commit();
379 }
381 dm_state->n_pages = n_pages;
383 *dmh_out = dm_state;
385 return 0;
386 }
387 EXPORT_SYMBOL(efx_vi_dma_map_addrs);
389 void
390 efx_vi_dma_unmap_pages(struct efx_vi_state *vih,
391 struct efx_vi_dma_map_state *dmh)
392 {
393 struct efx_vi_state *efx_state = vih;
394 struct efx_vi_dma_map_state *dm_state =
395 (struct efx_vi_dma_map_state *)dmh;
396 int i;
398 efrm_buffer_table_free(&dm_state->bt_handle);
400 for (i = 0; i < dm_state->n_pages; ++i)
401 pci_unmap_page(linux_efhw_nic
402 (efrm_nic_tablep->nic[efx_state->nic_index])->pci_dev,
403 dm_state->dma_addrs[i], PAGE_SIZE, PCI_DMA_TODEVICE);
405 kfree(dm_state->dma_addrs);
406 kfree(dm_state);
408 return;
409 }
410 EXPORT_SYMBOL(efx_vi_dma_unmap_pages);
412 void
413 efx_vi_dma_unmap_addrs(struct efx_vi_state *vih,
414 struct efx_vi_dma_map_state *dmh)
415 {
416 struct efx_vi_dma_map_state *dm_state =
417 (struct efx_vi_dma_map_state *)dmh;
419 efrm_buffer_table_free(&dm_state->bt_handle);
421 kfree(dm_state->dma_addrs);
422 kfree(dm_state);
424 return;
425 }
426 EXPORT_SYMBOL(efx_vi_dma_unmap_addrs);
428 unsigned
429 efx_vi_dma_get_map_addr(struct efx_vi_state *vih,
430 struct efx_vi_dma_map_state *dmh)
431 {
432 struct efx_vi_dma_map_state *dm_state =
433 (struct efx_vi_dma_map_state *)dmh;
435 return EFHW_BUFFER_ADDR(dm_state->bt_handle.base, 0);
436 }
437 EXPORT_SYMBOL(efx_vi_dma_get_map_addr);
439 #if EFX_VI_STATIC_FILTERS
440 static int
441 get_filter(struct efx_vi_state *efx_state,
442 efrm_resource_handle_t pthandle, struct filter_resource **fres_out)
443 {
444 struct filter_list_t *flist;
445 if (efx_state->free_fres == NULL)
446 return -ENOMEM;
447 else {
448 flist = efx_state->free_fres;
449 efx_state->free_fres = flist->next;
450 flist->next = efx_state->used_fres;
451 efx_state->used_fres = flist;
452 *fres_out = flist->fres;
453 return 0;
454 }
455 }
456 #endif
458 static void
459 release_filter(struct efx_vi_state *efx_state, struct filter_resource *fres)
460 {
461 #if EFX_VI_STATIC_FILTERS
462 struct filter_list_t *flist = efx_state->used_fres, *prev = NULL;
463 while (flist) {
464 if (flist->fres == fres) {
465 if (prev)
466 prev->next = flist->next;
467 else
468 efx_state->used_fres = flist->next;
469 flist->next = efx_state->free_fres;
470 efx_state->free_fres = flist;
471 return;
472 }
473 prev = flist;
474 flist = flist->next;
475 }
476 EFRM_ERR("%s: couldn't find filter", __FUNCTION__);
477 #else
478 return efrm_filter_resource_release(fres);
479 #endif
480 }
482 int
483 efx_vi_filter(struct efx_vi_state *vih, int protocol,
484 unsigned ip_addr_be32, int port_le16,
485 struct filter_resource_t **fh_out)
486 {
487 struct efx_vi_state *efx_state = vih;
488 struct filter_resource *frs;
489 int rc;
491 #if EFX_VI_STATIC_FILTERS
492 rc = get_filter(efx_state, efx_state->vi_res->rs.rs_handle, &frs);
493 #else
494 rc = efrm_filter_resource_alloc(efx_state->vi_res, &frs);
495 #endif
496 if (rc < 0)
497 return rc;
499 /* Add the hardware filter. We pass in the source port and address
500 * as 0 (wildcard) to minimise the number of filters needed. */
501 if (protocol == IPPROTO_TCP) {
502 rc = efrm_filter_resource_tcp_set(frs, 0, 0, ip_addr_be32,
503 port_le16);
504 } else {
505 rc = efrm_filter_resource_udp_set(frs, 0, 0, ip_addr_be32,
506 port_le16);
507 }
509 *fh_out = (struct filter_resource_t *)frs;
511 return rc;
512 }
513 EXPORT_SYMBOL(efx_vi_filter);
515 int
516 efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh)
517 {
518 struct efx_vi_state *efx_state = vih;
519 struct filter_resource *frs = (struct filter_resource *)fh;
520 int rc;
522 rc = efrm_filter_resource_clear(frs);
523 release_filter(efx_state, frs);
525 return rc;
526 }
527 EXPORT_SYMBOL(efx_vi_filter_stop);
529 int
530 efx_vi_hw_resource_get_virt(struct efx_vi_state *vih,
531 struct efx_vi_hw_resource_metadata *mdata,
532 struct efx_vi_hw_resource *hw_res_array,
533 int *length)
534 {
535 EFRM_NOTICE("%s: TODO!", __FUNCTION__);
537 return 0;
538 }
539 EXPORT_SYMBOL(efx_vi_hw_resource_get_virt);
541 #if defined(__CI_HARDWARE_CONFIG_FALCON__)
542 int
543 efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
544 struct efx_vi_hw_resource_metadata *mdata,
545 struct efx_vi_hw_resource *hw_res_array,
546 int *length)
547 {
548 struct efx_vi_state *efx_state = vih;
549 int i, ni = efx_state->nic_index;
550 struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_tablep->nic[ni]);
551 unsigned long phys = lnic->ctr_ap_pci_addr;
552 struct efrm_resource *ep_res = &efx_state->vi_res->rs;
553 unsigned ep_mmap_bytes;
555 if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
556 return -EINVAL;
558 mdata->nic_arch = efrm_nic_tablep->nic[ni]->devtype.arch;
559 mdata->nic_variant = efrm_nic_tablep->nic[ni]->devtype.variant;
560 mdata->nic_revision = efrm_nic_tablep->nic[ni]->devtype.revision;
562 mdata->evq_order =
563 efx_state->vi_res->nic_info[ni].evq_pages.iobuff.order;
564 mdata->evq_offs = efx_state->vi_res->nic_info[ni].evq_pages.iobuff_off;
565 mdata->evq_capacity = efx_vi_eventq_size;
566 mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle);
567 mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE;
568 mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE;
570 ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP;
571 EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2);
573 #ifndef NDEBUG
574 {
575 /* Sanity about doorbells */
576 unsigned long tx_dma_page_addr, rx_dma_page_addr;
578 /* get rx doorbell address */
579 rx_dma_page_addr =
580 phys + falcon_rx_dma_page_addr(mdata->instance);
581 /* get tx doorbell address */
582 tx_dma_page_addr =
583 phys + falcon_tx_dma_page_addr(mdata->instance);
585 /* Check the lower bits of the TX doorbell will be
586 * consistent. */
587 EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST &
588 FALCON_DMA_PAGE_MASK) ==
589 (TX_DESC_UPD_REG_PAGE123K_OFST &
590 FALCON_DMA_PAGE_MASK));
592 /* Check the lower bits of the RX doorbell will be
593 * consistent. */
594 EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST &
595 FALCON_DMA_PAGE_MASK) ==
596 (RX_DESC_UPD_REG_PAGE123K_OFST &
597 FALCON_DMA_PAGE_MASK));
599 /* Check that the doorbells will be in the same page. */
600 EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) ==
601 (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK));
603 /* Check that the doorbells are in the same page. */
604 EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) ==
605 (rx_dma_page_addr & PAGE_MASK));
607 /* Check that the TX doorbell offset is correct. */
608 EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
609 (tx_dma_page_addr & ~PAGE_MASK));
611 /* Check that the RX doorbell offset is correct. */
612 EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
613 (rx_dma_page_addr & ~PAGE_MASK));
614 }
615 #endif
617 i = 0;
618 hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ;
619 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
620 hw_res_array[i].more_to_follow = 0;
621 hw_res_array[i].length = PAGE_SIZE;
622 hw_res_array[i].address =
623 (unsigned long)efx_state->vi_res->nic_info[ni].
624 dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva;
626 i++;
627 hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ;
628 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
629 hw_res_array[i].more_to_follow = 0;
630 hw_res_array[i].length = PAGE_SIZE;
631 hw_res_array[i].address =
632 (unsigned long)efx_state->vi_res->nic_info[ni].
633 dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;
635 i++;
636 hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
637 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
638 hw_res_array[i].more_to_follow = 0;
639 hw_res_array[i].length = PAGE_SIZE;
640 hw_res_array[i].address =
641 (unsigned long)phys + falcon_timer_page_addr(mdata->instance);
643 /* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */
645 i++;
646 switch (efrm_nic_tablep->nic[ni]->devtype.variant) {
647 case 'A':
648 hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
649 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
650 hw_res_array[i].more_to_follow = 0;
651 hw_res_array[i].length = PAGE_SIZE;
652 hw_res_array[i].address = (unsigned long)phys +
653 EVQ_RPTR_REG_OFST +
654 (FALCON_REGISTER128 * mdata->instance);
655 break;
656 case 'B':
657 hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET;
658 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
659 hw_res_array[i].more_to_follow = 0;
660 hw_res_array[i].length = PAGE_SIZE;
661 hw_res_array[i].address =
662 (unsigned long)FALCON_EVQ_RPTR_REG_P0;
663 break;
664 default:
665 EFRM_ASSERT(0);
666 break;
667 }
669 i++;
670 hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA;
671 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER;
672 hw_res_array[i].more_to_follow = 0;
673 hw_res_array[i].length = PAGE_SIZE;
674 hw_res_array[i].address = (unsigned long)efx_state->vi_res->
675 nic_info[ni].evq_pages.iobuff.kva;
677 i++;
678 hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE;
679 hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
680 hw_res_array[i].more_to_follow = 0;
681 hw_res_array[i].length = PAGE_SIZE;
682 hw_res_array[i].address =
683 (unsigned long)(phys +
684 falcon_tx_dma_page_addr(mdata->instance))
685 >> PAGE_SHIFT;
687 i++;
689 EFRM_ASSERT(i <= *length);
691 *length = i;
693 return 0;
694 }
695 EXPORT_SYMBOL(efx_vi_hw_resource_get_phys);
696 #endif