]> xenbits.xensource.com Git - people/julieng/linux-arm.git/commitdiff
net/mlx4: Remove improper usage of dma_alloc_coherent().
authorDavid Daney <david.daney@cavium.com>
Mon, 6 Apr 2015 23:00:29 +0000 (16:00 -0700)
committerJulien Grall <julien.grall@citrix.com>
Mon, 28 Sep 2015 11:05:11 +0000 (12:05 +0100)
The dma_alloc_coherent() function returns a virtual address which can
be used for coherent access to the underlying memory.  On some
architectures, like arm64, undefined behavior results if this memory is
also accessed via virtual mappings that are not coherent.  Because of
their undefined nature, operations like virt_to_page() return garbage
when passed virtual addresses obtained from dma_alloc_coherent().  Any
subsequent mappings via vmap() of the garbage page values are unusable
and result in bad things like bus errors (synchronous aborts in ARM64
speak).

The MLX4 driver contains code that does the equivalent of:

  vmap(virt_to_page(dma_alloc_coherent))

This results in an OOPs when the device is opened.

To fix this...

Always use result of dma_alloc_coherent() directly.

Remove 'max_direct' parameter to mlx4_buf_alloc(), as it is unused,
and adjust all callers.

Remove mlx4_en_map_buffer() and mlx4_en_unmap_buffer() as they now do
nothing, and adjust all callers.

Remove 'page_list' element from struct mlx4_buf as it is unused.

Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Robert Richter <rrichter@cavium.com>
Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev@caviumnetworks.com>
12 files changed:
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
include/linux/mlx4/device.h

index 36eb3d012b6d34ac96823cb193001afb42ae95ce..25e5aa5817d48e6645a9f494d69c90a5bfdb0b9f 100644 (file)
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
        int err;
 
        err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
-                            PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+                            &buf->buf, GFP_KERNEL);
 
        if (err)
                goto out;
index c5a3a5f0de41f696a8eadd69f79ceecbad4628bd..baf9e117b44cf9ad5940cd64c62e712fd1dc1526 100644 (file)
@@ -772,7 +772,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        *qp->db.db = 0;
                }
 
-               if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
+               if (mlx4_buf_alloc(dev->dev, qp->buf_size, &qp->buf, gfp)) {
                        err = -ENOMEM;
                        goto err_db;
                }
index dce5dfe3a70ea957a6780eca47e0c15e0d054300..121730b87eacf741bc643cd1bac90bc483d3380f 100644 (file)
@@ -140,8 +140,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 
                *srq->db.db = 0;
 
-               if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
-                                  GFP_KERNEL)) {
+               if (mlx4_buf_alloc(dev->dev, buf_size, &srq->buf, GFP_KERNEL)) {
                        err = -ENOMEM;
                        goto err_db;
                }
index 0c51c69f802faf63a849300508ad4e809eeb7e3a..db6ba3e2b02e6a808dc1abc77fd73bcfd1cc760c 100644 (file)
@@ -576,103 +576,41 @@ out:
 
        return res;
 }
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size,
                   struct mlx4_buf *buf, gfp_t gfp)
 {
        dma_addr_t t;
 
-       if (size <= max_direct) {
-               buf->nbufs        = 1;
-               buf->npages       = 1;
-               buf->page_shift   = get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_alloc_coherent(&dev->persist->pdev->dev,
-                                                      size, &t, gfp);
-               if (!buf->direct.buf)
-                       return -ENOMEM;
-
-               buf->direct.map = t;
-
-               while (t & ((1 << buf->page_shift) - 1)) {
-                       --buf->page_shift;
-                       buf->npages *= 2;
-               }
+       buf->nbufs        = 1;
+       buf->npages       = 1;
+       buf->page_shift   = get_order(size) + PAGE_SHIFT;
+       buf->direct.buf   = dma_alloc_coherent(&dev->persist->pdev->dev,
+                                              size, &t, gfp);
+       if (!buf->direct.buf)
+               return -ENOMEM;
 
-               memset(buf->direct.buf, 0, size);
-       } else {
-               int i;
-
-               buf->direct.buf  = NULL;
-               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               buf->npages      = buf->nbufs;
-               buf->page_shift  = PAGE_SHIFT;
-               buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-                                          gfp);
-               if (!buf->page_list)
-                       return -ENOMEM;
-
-               for (i = 0; i < buf->nbufs; ++i) {
-                       buf->page_list[i].buf =
-                               dma_alloc_coherent(&dev->persist->pdev->dev,
-                                                  PAGE_SIZE,
-                                                  &t, gfp);
-                       if (!buf->page_list[i].buf)
-                               goto err_free;
-
-                       buf->page_list[i].map = t;
-
-                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
-               }
+       buf->direct.map = t;
 
-               if (BITS_PER_LONG == 64) {
-                       struct page **pages;
-                       pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
-                       if (!pages)
-                               goto err_free;
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               }
+       while (t & ((1 << buf->page_shift) - 1)) {
+               --buf->page_shift;
+               buf->npages *= 2;
        }
 
-       return 0;
-
-err_free:
-       mlx4_buf_free(dev, size, buf);
+       memset(buf->direct.buf, 0, size);
 
-       return -ENOMEM;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
 
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 {
-       int i;
-
-       if (buf->nbufs == 1)
-               dma_free_coherent(&dev->persist->pdev->dev, size,
-                                 buf->direct.buf,
-                                 buf->direct.map);
-       else {
-               if (BITS_PER_LONG == 64)
-                       vunmap(buf->direct.buf);
-
-               for (i = 0; i < buf->nbufs; ++i)
-                       if (buf->page_list[i].buf)
-                               dma_free_coherent(&dev->persist->pdev->dev,
-                                                 PAGE_SIZE,
-                                                 buf->page_list[i].buf,
-                                                 buf->page_list[i].map);
-               kfree(buf->page_list);
-       }
+       dma_free_coherent(&dev->persist->pdev->dev, size,
+                         buf->direct.buf, buf->direct.map);
 }
 EXPORT_SYMBOL_GPL(mlx4_buf_free);
 
@@ -789,7 +727,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
 EXPORT_SYMBOL_GPL(mlx4_db_free);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size, int max_direct)
+                      int size)
 {
        int err;
 
@@ -799,7 +737,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 
        *wqres->db.db = 0;
 
-       err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+       err = mlx4_buf_alloc(dev, size, &wqres->buf, GFP_KERNEL);
        if (err)
                goto err_db;
 
index 63769df872a42be81784eff223ef41a9e5a639e6..fa0e0b1ae3069baa613dd41860fc9381a43b0e0e 100644 (file)
@@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
         */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
        err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
-                               cq->buf_size, 2 * PAGE_SIZE);
+                               cq->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_cq;
 
-       err = mlx4_en_map_buffer(&cq->wqres.buf);
-       if (err)
-               goto err_res;
-
        cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
        *pcq = cq;
 
        return 0;
 
-err_res:
-       mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
 err_cq:
        kfree(cq);
        *pcq = NULL;
@@ -180,7 +174,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_cq *cq = *pcq;
 
-       mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
            cq->is_tx == RX)
index e0de2fd1ce124d3d668659b89544d172164037f4..e2a489c0135fbde49914d019d3c05138ea53fe27 100644 (file)
@@ -2895,7 +2895,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
        /* Allocate page for receive rings */
        err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
-                               MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+                               MLX4_EN_PAGE_SIZE);
        if (err) {
                en_err(priv, "Failed to allocate page for rx qps\n");
                goto out;
index e482fa1bb7410e2eff0f2ed59948bd5fbd7decd8..e675dba1f662427592cd7ca614e8c35fbf49995b 100644 (file)
@@ -80,38 +80,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        }
 }
 
-
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
-       struct page **pages;
-       int i;
-
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return 0;
-
-       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-
-       for (i = 0; i < buf->nbufs; ++i)
-               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-       kfree(pages);
-       if (!buf->direct.buf)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return;
-
-       vunmap(buf->direct.buf);
-}
-
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
 {
     return;
index 9c145dddd7175fffda22c71a8f31d4d27d807ceb..e36f3c62dc0c506c904472e32a49db56ed5ebf3b 100644 (file)
@@ -391,17 +391,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 
        /* Allocate HW buffers on provided NUMA node */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
-                                ring->buf_size, 2 * PAGE_SIZE);
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_info;
 
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map RX buffer\n");
-               goto err_hwq;
-       }
        ring->buf = ring->wqres.buf.direct.buf;
 
        ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -409,8 +403,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
        *pring = ring;
        return 0;
 
-err_hwq:
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_info:
        vfree(ring->rx_info);
        ring->rx_info = NULL;
@@ -514,7 +506,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rx_ring *ring = *pring;
 
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
        vfree(ring->rx_info);
        ring->rx_info = NULL;
index c10d98f6ad967b13640b5d9b2fe033f377565ff0..47dd7a0a8755686205bcbc4ff5f3d321d2c780dd 100644 (file)
@@ -93,20 +93,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        /* Allocate HW buffers on provided NUMA node */
        set_dev_node(&mdev->dev->persist->pdev->dev, node);
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
-                                2 * PAGE_SIZE);
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err) {
                en_err(priv, "Failed allocating hwq resources\n");
                goto err_bounce;
        }
 
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map TX buffer\n");
-               goto err_hwq_res;
-       }
-
        ring->buf = ring->wqres.buf.direct.buf;
 
        en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +110,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                                    MLX4_RESERVE_ETH_BF_QP);
        if (err) {
                en_err(priv, "failed reserving qp for TX ring\n");
-               goto err_map;
+               goto err_hwq_res;
        }
 
        err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +147,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
 err_reserve:
        mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
 err_hwq_res:
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_bounce:
@@ -182,7 +173,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
        mlx4_qp_remove(mdev->dev, &ring->qp);
        mlx4_qp_free(mdev->dev, &ring->qp);
        mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        kfree(ring->bounce_buf);
        ring->bounce_buf = NULL;
index 666d1669eb5233f9a8e6baf5773621159375af25..d34e78592f45acc5ab59063e2e8ec3435f041756 100644 (file)
@@ -795,8 +795,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
                int is_tx, int rss, int qpn, int cqn, int user_prio,
                struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
 
 void mlx4_en_calc_rx_buf(struct net_device *dev);
 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
index 78f51e103880d4dcae7745ec5cb5b2425e370e73..095f3ca2ba72b6339297b61bb0d8800ee5241c47 100644 (file)
@@ -802,10 +802,7 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                return -ENOMEM;
 
        for (i = 0; i < buf->npages; ++i)
-               if (buf->nbufs == 1)
-                       page_list[i] = buf->direct.map + (i << buf->page_shift);
-               else
-                       page_list[i] = buf->page_list[i].map;
+               page_list[i] = buf->direct.map + (i << buf->page_shift);
 
        err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
 
index fd13c1ce3b4abf797a4a720c9dd567587a477274..3d33739839d28a21aaf9563b2012acb073b6c765 100644 (file)
@@ -595,7 +595,6 @@ struct mlx4_buf_list {
 
 struct mlx4_buf {
        struct mlx4_buf_list    direct;
-       struct mlx4_buf_list   *page_list;
        int                     nbufs;
        int                     npages;
        int                     page_shift;
@@ -1024,16 +1023,12 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
        return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
 }
 
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size,
                   struct mlx4_buf *buf, gfp_t gfp);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
 {
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return buf->direct.buf + offset;
-       else
-               return buf->page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
+       return buf->direct.buf + offset;
 }
 
 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
@@ -1069,7 +1064,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size, int max_direct);
+                      int size);
 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
                       int size);