]> xenbits.xensource.com Git - unikraft/unikraft.git/commitdiff
plat/xen/drivers/net: Create netfront queues
authorCostin Lupu <costin.lupu@cs.pub.ro>
Tue, 17 Nov 2020 08:38:42 +0000 (10:38 +0200)
committerSimon Kuenzer <simon.kuenzer@neclab.eu>
Fri, 14 May 2021 13:50:10 +0000 (15:50 +0200)
We continue with the device configuration by retrieving the Xenstore
information regarding the number of queues and their associated event channels.
Netfront devices operate pairs of Rx/Tx queues and for notifications we can
either use a single event channel per pair or split event channels.

Signed-off-by: Costin Lupu <costin.lupu@cs.pub.ro>
Signed-off-by: Razvan Cojocaru <razvan.cojocaru93@gmail.com>
Reviewed-by: Sharan Santhanam <sharan.santhanam@neclab.eu>
plat/xen/drivers/net/netfront.c
plat/xen/drivers/net/netfront.h
plat/xen/drivers/net/netfront_xs.c

index ebd18b65b684c08e77fa1c4af16ee4f28ef402a8..92413bd007e0496c2a8c29dcbc340b1485e44ea7 100644 (file)
 
 static struct uk_alloc *drv_allocator;
 
+static int netfront_rxtx_alloc(struct netfront_dev *nfdev,
+               const struct uk_netdev_conf *conf)
+{
+       int rc = 0;
+
+       if (conf->nb_tx_queues != conf->nb_rx_queues) {
+               uk_pr_err("Different number of queues not supported\n");
+               rc = -ENOTSUP;
+               goto err_free_txrx;
+       }
+
+       nfdev->max_queue_pairs =
+               MIN(nfdev->max_queue_pairs, conf->nb_tx_queues);
+
+       nfdev->txqs = uk_calloc(drv_allocator,
+               nfdev->max_queue_pairs, sizeof(*nfdev->txqs));
+       if (unlikely(!nfdev->txqs)) {
+               uk_pr_err("Failed to allocate memory for tx queues\n");
+               rc = -ENOMEM;
+               goto err_free_txrx;
+       }
+
+       nfdev->rxqs = uk_calloc(drv_allocator,
+               nfdev->max_queue_pairs, sizeof(*nfdev->rxqs));
+       if (unlikely(!nfdev->rxqs)) {
+               uk_pr_err("Failed to allocate memory for rx queues\n");
+               rc = -ENOMEM;
+               goto err_free_txrx;
+       }
+
+       return rc;
+
+err_free_txrx:
+       if (!nfdev->rxqs)
+               uk_free(drv_allocator, nfdev->rxqs);
+       if (!nfdev->txqs)
+               uk_free(drv_allocator, nfdev->txqs);
+
+       return rc;
+}
+
+static int netfront_configure(struct uk_netdev *n,
+               const struct uk_netdev_conf *conf)
+{
+       int rc;
+       struct netfront_dev *nfdev;
+
+       UK_ASSERT(n != NULL);
+       UK_ASSERT(conf != NULL);
+
+       nfdev = to_netfront_dev(n);
+
+       rc = netfront_rxtx_alloc(nfdev, conf);
+       if (rc != 0) {
+               uk_pr_err("Failed to allocate rx and tx rings %d\n", rc);
+               goto out;
+       }
+
+out:
+       return rc;
+}
+
+static void netfront_info_get(struct uk_netdev *n,
+               struct uk_netdev_info *dev_info)
+{
+       struct netfront_dev *nfdev;
+
+       UK_ASSERT(n != NULL);
+       UK_ASSERT(dev_info != NULL);
+
+       nfdev = to_netfront_dev(n);
+       dev_info->max_rx_queues = nfdev->max_queue_pairs;
+       dev_info->max_tx_queues = nfdev->max_queue_pairs;
+       dev_info->max_mtu = nfdev->mtu;
+       dev_info->nb_encap_tx = 0;
+       dev_info->nb_encap_rx = 0;
+       dev_info->ioalign = PAGE_SIZE;
+       dev_info->features = UK_FEATURE_RXQ_INTR_AVAILABLE;
+}
+
 static const void *netfront_einfo_get(struct uk_netdev *n,
                enum uk_netdev_einfo_type einfo_type)
 {
@@ -98,6 +178,8 @@ static unsigned int netfront_promisc_get(struct uk_netdev *n)
 }
 
 static const struct uk_netdev_ops netfront_ops = {
+       .configure = netfront_configure,
+       .info_get = netfront_info_get,
        .einfo_get = netfront_einfo_get,
        .hwaddr_get = netfront_mac_get,
        .mtu_get = netfront_mtu_get,
@@ -119,6 +201,7 @@ static int netfront_add_dev(struct xenbus_device *xendev)
 
        nfdev->xendev = xendev;
        nfdev->mtu = UK_ETH_PAYLOAD_MAXLEN;
+       nfdev->max_queue_pairs = 1;
 
        /* Xenbus initialization */
        rc = netfront_xb_init(nfdev, drv_allocator);
index 89a8028cc5e6f054a5a57b9cfc983ac30d195e52..4b1c6649f945746fec710603889d8ca481a1192f 100644 (file)
 
 #include <uk/netdev.h>
 
+/**
+ * internal structure to represent the transmit queue.
+ */
+struct uk_netdev_tx_queue {
+};
+
+/**
+ * internal structure to represent the receive queue.
+ */
+struct uk_netdev_rx_queue {
+};
+
 struct xs_econf {
        char *ipv4addr;
        char *ipv4mask;
@@ -48,6 +60,14 @@ struct netfront_dev {
        /* Network device */
        struct uk_netdev netdev;
 
+       /* List of the Rx/Tx queues */
+       struct uk_netdev_tx_queue *txqs;
+       struct uk_netdev_rx_queue *rxqs;
+       /* Maximum number of queue pairs */
+       uint16_t  max_queue_pairs;
+       /* True if using split event channels */
+       bool split_evtchn;
+
        /* Configuration parameters */
        struct xs_econf econf;
 
index 470ae1136a3f8fa28b5392d329834f73309dd2b3..bb15a14808222b4722aec638371e5114e2107349 100644 (file)
@@ -121,7 +121,7 @@ out_err:
 int netfront_xb_init(struct netfront_dev *nfdev, struct uk_alloc *a)
 {
        struct xenbus_device *xendev;
-       char *mac_str, *p, *ip_str;
+       char *mac_str, *p, *ip_str, *int_str;
        int rc;
 
        UK_ASSERT(nfdev != NULL);
@@ -173,7 +173,21 @@ int netfront_xb_init(struct netfront_dev *nfdev, struct uk_alloc *a)
                goto no_conf;
        free(ip_str);
 
-       /* TODO spit event channels */
+       /* maximum queues number */
+       int_str = xs_read(XBT_NIL, xendev->otherend,
+               "multi-queue-max-queues");
+       if (!PTRISERR(int_str)) {
+               nfdev->max_queue_pairs = (uint16_t) strtoul(int_str, NULL, 10);
+               free(int_str);
+       }
+
+       /* spit event channels */
+       int_str = xs_read(XBT_NIL, xendev->otherend,
+               "feature-split-event-channels");
+       if (!PTRISERR(int_str)) {
+               nfdev->split_evtchn = (bool) strtoul(int_str, NULL, 10);
+               free(int_str);
+       }
 
        /* TODO netmap */