static struct uk_alloc *drv_allocator;
+static int netfront_rxtx_alloc(struct netfront_dev *nfdev,
+ const struct uk_netdev_conf *conf)
+{
+ int rc = 0;
+
+ if (conf->nb_tx_queues != conf->nb_rx_queues) {
+ uk_pr_err("Different number of queues not supported\n");
+ rc = -ENOTSUP;
+ goto err_free_txrx;
+ }
+
+ nfdev->max_queue_pairs =
+ MIN(nfdev->max_queue_pairs, conf->nb_tx_queues);
+
+ nfdev->txqs = uk_calloc(drv_allocator,
+ nfdev->max_queue_pairs, sizeof(*nfdev->txqs));
+ if (unlikely(!nfdev->txqs)) {
+ uk_pr_err("Failed to allocate memory for tx queues\n");
+ rc = -ENOMEM;
+ goto err_free_txrx;
+ }
+
+ nfdev->rxqs = uk_calloc(drv_allocator,
+ nfdev->max_queue_pairs, sizeof(*nfdev->rxqs));
+ if (unlikely(!nfdev->rxqs)) {
+ uk_pr_err("Failed to allocate memory for rx queues\n");
+ rc = -ENOMEM;
+ goto err_free_txrx;
+ }
+
+ return rc;
+
+err_free_txrx:
+ if (!nfdev->rxqs)
+ uk_free(drv_allocator, nfdev->rxqs);
+ if (!nfdev->txqs)
+ uk_free(drv_allocator, nfdev->txqs);
+
+ return rc;
+}
+
+static int netfront_configure(struct uk_netdev *n,
+ const struct uk_netdev_conf *conf)
+{
+ int rc;
+ struct netfront_dev *nfdev;
+
+ UK_ASSERT(n != NULL);
+ UK_ASSERT(conf != NULL);
+
+ nfdev = to_netfront_dev(n);
+
+ rc = netfront_rxtx_alloc(nfdev, conf);
+ if (rc != 0) {
+ uk_pr_err("Failed to allocate rx and tx rings %d\n", rc);
+ goto out;
+ }
+
+out:
+ return rc;
+}
+
+static void netfront_info_get(struct uk_netdev *n,
+ struct uk_netdev_info *dev_info)
+{
+ struct netfront_dev *nfdev;
+
+ UK_ASSERT(n != NULL);
+ UK_ASSERT(dev_info != NULL);
+
+ nfdev = to_netfront_dev(n);
+ dev_info->max_rx_queues = nfdev->max_queue_pairs;
+ dev_info->max_tx_queues = nfdev->max_queue_pairs;
+ dev_info->max_mtu = nfdev->mtu;
+ dev_info->nb_encap_tx = 0;
+ dev_info->nb_encap_rx = 0;
+ dev_info->ioalign = PAGE_SIZE;
+ dev_info->features = UK_FEATURE_RXQ_INTR_AVAILABLE;
+}
+
static const void *netfront_einfo_get(struct uk_netdev *n,
enum uk_netdev_einfo_type einfo_type)
{
}
static const struct uk_netdev_ops netfront_ops = {
+ .configure = netfront_configure,
+ .info_get = netfront_info_get,
.einfo_get = netfront_einfo_get,
.hwaddr_get = netfront_mac_get,
.mtu_get = netfront_mtu_get,
nfdev->xendev = xendev;
nfdev->mtu = UK_ETH_PAYLOAD_MAXLEN;
+ nfdev->max_queue_pairs = 1;
/* Xenbus initialization */
rc = netfront_xb_init(nfdev, drv_allocator);
#include <uk/netdev.h>
+/**
+ * internal structure to represent the transmit queue.
+ */
+struct uk_netdev_tx_queue {
+};
+
+/**
+ * internal structure to represent the receive queue.
+ */
+struct uk_netdev_rx_queue {
+};
+
struct xs_econf {
char *ipv4addr;
char *ipv4mask;
/* Network device */
struct uk_netdev netdev;
+ /* List of the Rx/Tx queues */
+ struct uk_netdev_tx_queue *txqs;
+ struct uk_netdev_rx_queue *rxqs;
+ /* Maximum number of queue pairs */
+ uint16_t max_queue_pairs;
+ /* True if using split event channels */
+ bool split_evtchn;
+
/* Configuration parameters */
struct xs_econf econf;
int netfront_xb_init(struct netfront_dev *nfdev, struct uk_alloc *a)
{
struct xenbus_device *xendev;
- char *mac_str, *p, *ip_str;
+ char *mac_str, *p, *ip_str, *int_str;
int rc;
UK_ASSERT(nfdev != NULL);
goto no_conf;
free(ip_str);
- /* TODO spit event channels */
+ /* maximum queues number */
+ int_str = xs_read(XBT_NIL, xendev->otherend,
+ "multi-queue-max-queues");
+ if (!PTRISERR(int_str)) {
+ nfdev->max_queue_pairs = (uint16_t) strtoul(int_str, NULL, 10);
+ free(int_str);
+ }
+
+ /* spit event channels */
+ int_str = xs_read(XBT_NIL, xendev->otherend,
+ "feature-split-event-channels");
+ if (!PTRISERR(int_str)) {
+ nfdev->split_evtchn = (bool) strtoul(int_str, NULL, 10);
+ free(int_str);
+ }
/* TODO netmap */