]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
authorStephen Rothwell <sfr@canb.auug.org.au>
Mon, 17 Mar 2025 01:52:29 +0000 (12:52 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 17 Mar 2025 01:52:29 +0000 (12:52 +1100)
# Conflicts:
# drivers/infiniband/sw/rxe/rxe.c

1  2 
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/mana/main.c
drivers/infiniband/sw/rxe/rxe.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/microsoft/mana/gdma_main.c

Simple merge
index e27478fe9456c9417dcadc45013f4dc884f6d744,4e56a371deb5ffd812c848ac73c2a4735f9ef2de..c83e2cf8274814439f01569988867ffae743fff8
@@@ -72,10 -71,45 +69,39 @@@ static void rxe_init_device_param(struc
        rxe->attr.max_pkeys                     = RXE_MAX_PKEYS;
        rxe->attr.local_ca_ack_delay            = RXE_LOCAL_CA_ACK_DELAY;
  
 -      ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
 -      if (!ndev)
 -              return;
 -
+       if (ndev->addr_len) {
+               memcpy(rxe->raw_gid, ndev->dev_addr,
+                       min_t(unsigned int, ndev->addr_len, ETH_ALEN));
+       } else {
+               /*
+                * This device does not have a HW address, but
+                * connection mangagement requires a unique gid.
+                */
+               eth_random_addr(rxe->raw_gid);
+       }
        addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
-                       ndev->dev_addr);
+                       rxe->raw_gid);
  
 -      dev_put(ndev);
 -
        rxe->max_ucontext                       = RXE_MAX_UCONTEXT;
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+               rxe->attr.kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+               /* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
+               rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+               rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+               rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+       }
  }
  
  /* initialize port attributes */
@@@ -107,13 -141,18 +133,13 @@@ static void rxe_init_port_param(struct 
  /* initialize port state, note IB convention that HCA ports are always
   * numbered from 1
   */
- static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
+ static void rxe_init_ports(struct rxe_dev *rxe)
  {
        struct rxe_port *port = &rxe->port;
 -      struct net_device *ndev;
  
        rxe_init_port_param(port);
 -      ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
 -      if (!ndev)
 -              return;
        addrconf_addr_eui48((unsigned char *)&port->port_guid,
-                           ndev->dev_addr);
+                           rxe->raw_gid);
 -      dev_put(ndev);
        spin_lock_init(&port->port_lock);
  }
  
@@@ -131,12 -170,12 +157,12 @@@ static void rxe_init_pools(struct rxe_d
  }
  
  /* initialize rxe device state */
 -static void rxe_init(struct rxe_dev *rxe)
 +static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
  {
        /* init default device parameters */
 -      rxe_init_device_param(rxe);
 +      rxe_init_device_param(rxe, ndev);
  
-       rxe_init_ports(rxe, ndev);
+       rxe_init_ports(rxe);
        rxe_init_pools(rxe);
  
        /* init pending mmap list */