]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Mar 2025 18:12:28 +0000 (11:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 Mar 2025 18:12:28 +0000 (11:12 -0700)
Pull rdma updates from Jason Gunthorpe:

 - Usual minor updates and fixes for bnxt_re, hfi1, rxe, mana, iser,
   mlx5, vmw_pvrdma, hns

 - Make rxe work on tun devices

 - mana gains more standard verbs as it moves toward supporting
   in-kernel verbs

 - DMABUF support for mana

 - Fix page size calculations when memory registration exceeds 4G

 - On Demand Paging support for rxe

 - mlx5 support for RDMA TRANSPORT flow tables and a new ucap mechanism
   to access control use of them

 - Optional RDMA_TX/RX counters per QP in mlx5

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (73 commits)
  IB/mad: Check available slots before posting receive WRs
  RDMA/mana_ib: Fix integer overflow during queue creation
  RDMA/mlx5: Fix calculation of total invalidated pages
  RDMA/mlx5: Fix mlx5_poll_one() cur_qp update flow
  RDMA/mlx5: Fix page_size variable overflow
  RDMA/mlx5: Drop access_flags from _mlx5_mr_cache_alloc()
  RDMA/mlx5: Fix cache entry update on dereg error
  RDMA/mlx5: Fix MR cache initialization error flow
  RDMA/mlx5: Support optional-counters binding for QPs
  RDMA/mlx5: Compile fs.c regardless of INFINIBAND_USER_ACCESS config
  RDMA/core: Pass port to counter bind/unbind operations
  RDMA/core: Add support to optional-counters binding configuration
  RDMA/core: Create and destroy rdma_counter using rdma_zalloc_drv_obj()
  RDMA/mlx5: Add optional counters for RDMA_TX/RX_packets/bytes
  RDMA/core: Fix use-after-free when rename device name
  RDMA/bnxt_re: Support perf management counters
  RDMA/rxe: Fix incorrect return value of rxe_odp_atomic_op()
  RDMA/uverbs: Propagate errors from rdma_lookup_get_uobject()
  RDMA/mana_ib: Handle net event for pointing to the current netdev
  net: mana: Change the function signature of mana_get_primary_netdev_rcu
  ...

16 files changed:
1  2 
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/hw_counters.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/mana/main.c
drivers/infiniband/hw/mlx5/counters.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/siw/siw.h
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
include/linux/mlx5/device.h
include/net/mana/gdma.h

Simple merge
Simple merge
Simple merge
index 81cfa74147a18351fa2a42c8fb842d8579de2c98,a37242d308ff6187623c1fee44c7d47a9087a1a5..b847084dcd9986ab78a13ae436b1da4e87387888
@@@ -543,10 -610,9 +610,10 @@@ static int mlx5_ib_counter_dealloc(stru
  }
  
  static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
-                                  struct ib_qp *qp)
+                                  struct ib_qp *qp, u32 port)
  {
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
 +      bool new = false;
        int err;
  
        if (!counter->id) {
        if (err)
                goto fail_set_counter;
  
+       err = mlx5r_fs_bind_op_fc(qp, counter, port);
+       if (err)
+               goto fail_bind_op_fc;
        return 0;
  
+ fail_bind_op_fc:
+       mlx5_ib_qp_set_counter(qp, NULL);
  fail_set_counter:
 -      mlx5_ib_counter_dealloc(counter);
 -      counter->id = 0;
 +      if (new) {
 +              mlx5_ib_counter_dealloc(counter);
 +              counter->id = 0;
 +      }
  
        return err;
  }
index 753faa9ad06a8876d4f7c848823825d55df9a43d,2080458cabd1ca02a417a7e675d6d99ab2cceb23..b7c8c926c5787028f97fff37eadbf728aa83497a
@@@ -2023,8 -2031,7 +2032,9 @@@ static int mlx5_revoke_mr(struct mlx5_i
        struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
        struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
        bool is_odp = is_odp_mr(mr);
 +      bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
 +                      !to_ib_umem_dmabuf(mr->umem)->pinned;
+       bool from_cache = !!ent;
        int ret = 0;
  
        if (is_odp)
Simple merge
index e27478fe9456c9417dcadc45013f4dc884f6d744,4e56a371deb5ffd812c848ac73c2a4735f9ef2de..b248c68bf9b1f0110eaeb255bf1d382fee336527
@@@ -72,10 -71,45 +69,39 @@@ static void rxe_init_device_param(struc
        rxe->attr.max_pkeys                     = RXE_MAX_PKEYS;
        rxe->attr.local_ca_ack_delay            = RXE_LOCAL_CA_ACK_DELAY;
  
 -      ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
 -      if (!ndev)
 -              return;
 -
+       if (ndev->addr_len) {
+               memcpy(rxe->raw_gid, ndev->dev_addr,
+                       min_t(unsigned int, ndev->addr_len, ETH_ALEN));
+       } else {
+               /*
+                * This device does not have a HW address, but
+                * connection mangagement requires a unique gid.
+                */
+               eth_random_addr(rxe->raw_gid);
+       }
        addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
-                       ndev->dev_addr);
+                       rxe->raw_gid);
  
 -      dev_put(ndev);
 -
        rxe->max_ucontext                       = RXE_MAX_UCONTEXT;
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+               rxe->attr.kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+               /* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
+               rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+       }
  }
  
  /* initialize port attributes */
@@@ -107,13 -141,18 +133,13 @@@ static void rxe_init_port_param(struct 
  /* initialize port state, note IB convention that HCA ports are always
   * numbered from 1
   */
 -static void rxe_init_ports(struct rxe_dev *rxe)
 +static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
  {
        struct rxe_port *port = &rxe->port;
 -      struct net_device *ndev;
  
        rxe_init_port_param(port);
 -      ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
 -      if (!ndev)
 -              return;
        addrconf_addr_eui48((unsigned char *)&port->port_guid,
-                           ndev->dev_addr);
+                           rxe->raw_gid);
 -      dev_put(ndev);
        spin_lock_init(&port->port_lock);
  }
  
Simple merge
index e190d5ee51544090a703c14c76abdc23797f864a,4e870b11f946676445a349c412f35725555db5bb..1423df8531f76ea4f8ddd0f353bccfbd10325c9e
        gd->driver_data = NULL;
        gd->gdma_context = NULL;
        kfree(ac);
 +      dev_dbg(dev, "%s succeeded\n", __func__);
  }
  
- struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+ struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+                                          u32 port_index,
+                                          netdevice_tracker *tracker)
  {
        struct net_device *ndev;
  
Simple merge
Simple merge