mutex_init(&scif_info.eplock);
spin_lock_init(&scif_info.rmalock);
spin_lock_init(&scif_info.nb_connect_lock);
- spin_lock_init(&scif_info.port_lock);
mutex_init(&scif_info.conflock);
mutex_init(&scif_info.connlock);
mutex_init(&scif_info.fencelock);
INIT_WORK(&scif_info.misc_work, scif_misc_handler);
INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
INIT_WORK(&scif_info.conn_work, scif_conn_handler);
- idr_init(&scif_ports);
return 0;
free_sdev:
scif_destroy_scifdev();
static void _scif_exit(void)
{
- idr_destroy(&scif_ports);
kmem_cache_destroy(unaligned_cache);
scif_destroy_scifdev();
}
* @eplock: Lock to synchronize listening, zombie endpoint lists
* @connlock: Lock to synchronize connected and disconnected lists
* @nb_connect_lock: Synchronize non blocking connect operations
- * @port_lock: Synchronize access to SCIF ports
* @uaccept: List of user acceptreq waiting for acceptreg
* @listen: List of listening end points
* @zombie: List of zombie end points with pending RMA's
struct mutex eplock;
struct mutex connlock;
spinlock_t nb_connect_lock;
- spinlock_t port_lock;
struct list_head uaccept;
struct list_head listen;
struct list_head zombie;
extern bool scif_reg_cache_enable;
extern bool scif_ulimit_check;
extern struct scif_info scif_info;
-extern struct idr scif_ports;
extern struct bus_type scif_peer_bus;
extern struct scif_dev *scif_dev;
extern const struct file_operations scif_fops;
*
* Intel SCIF driver.
*/
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include "scif_main.h"
#define SCIF_PORT_COUNT 0x10000 /* Ports available */
-struct idr scif_ports;
+static DEFINE_XARRAY_ALLOC(scif_ports);
/*
* struct scif_port - SCIF port information
* to the global list.
* @port : port # to be reserved.
*
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * Return: Allocated SCIF port #, or -EBUSY if port unavailable.
* On memory allocation failure, returns -ENOMEM.
*/
static int __scif_get_port(int start, int end)
{
- int id;
+ int err, id;
struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
return -ENOMEM;
- spin_lock(&scif_info.port_lock);
- id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
- if (id >= 0)
- port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
+ err = xa_alloc(&scif_ports, &id, port, XA_LIMIT(start, end),
+ GFP_ATOMIC);
+ if (err < 0) {
+ kfree(port);
+ return err;
+ }
+ port->ref_cnt++;
return id;
}
* scif_rsrv_port - Reserve a specified port # for SCIF.
* @port : port # to be reserved.
*
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
+ * Return: Allocated SCIF port #, or -EBUSY if port unavailable.
* On memory allocation failure, returns -ENOMEM.
*/
int scif_rsrv_port(u16 port)
{
- return __scif_get_port(port, port + 1);
+ return __scif_get_port(port, port);
}
/**
* scif_get_new_port - Get and reserve any port # for SCIF in the range
* SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
*
- * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
+ * Return: Allocated SCIF port #, or -EBUSY if no ports available.
* On memory allocation failure, returns -ENOMEM.
*/
int scif_get_new_port(void)
{
- return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
+ return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT - 1);
}
/**
if (!id)
return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
+ xa_lock(&scif_ports);
+ port = xa_load(&scif_ports, id);
if (port)
port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
+ xa_unlock(&scif_ports);
}
/**
if (!id)
return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
+ xa_lock(&scif_ports);
+ port = xa_load(&scif_ports, id);
if (port) {
port->ref_cnt--;
if (!port->ref_cnt) {
- idr_remove(&scif_ports, id);
+ xa_erase(&scif_ports, id);
kfree(port);
}
}
- spin_unlock(&scif_info.port_lock);
+ xa_unlock(&scif_ports);
}