#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
-#include <linux/idr.h>
#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/xarray.h>
#include "core.h"
u32 version;
struct fw_device *device;
- spinlock_t lock;
bool in_shutdown;
- struct idr resource_idr;
+ struct xarray resources;
struct list_head event_list;
wait_queue_head_t wait;
wait_queue_head_t tx_flush_wait;
struct iso_resource {
struct client_resource resource;
struct client *client;
- /* Schedule work and access todo only with client->lock held. */
+ /* Schedule work and access todo only with client->resources lock held. */
struct delayed_work work;
enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
}
client->device = device;
- spin_lock_init(&client->lock);
- idr_init(&client->resource_idr);
+ xa_init_flags(&client->resources, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
event->v[1].data = data1;
event->v[1].size = size1;
- spin_lock_irqsave(&client->lock, flags);
+ xa_lock_irqsave(&client->resources, flags);
if (client->in_shutdown)
kfree(event);
else
list_add_tail(&event->link, &client->event_list);
- spin_unlock_irqrestore(&client->lock, flags);
+ xa_unlock_irqrestore(&client->resources, flags);
wake_up_interruptible(&client->wait);
}
fw_device_is_shutdown(client->device))
return -ENODEV;
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
event = list_first_entry(&client->event_list, struct event, link);
list_del(&event->link);
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
mutex_unlock(&device->client_list_mutex);
}
-static int schedule_reallocations(int id, void *p, void *data)
-{
- schedule_if_iso_resource(p);
-
- return 0;
-}
-
static void queue_bus_reset_event(struct client *client)
{
struct bus_reset_event *e;
+ struct client_resource *res;
+ unsigned long index;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
queue_event(client, &e->event,
&e->reset, sizeof(e->reset), NULL, 0);
- spin_lock_irq(&client->lock);
- idr_for_each(&client->resource_idr, schedule_reallocations, client);
- spin_unlock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
+ xa_for_each(&client->resources, index, res) {
+ schedule_if_iso_resource(res);
+ }
+ xa_unlock_irq(&client->resources);
}
void fw_device_cdev_update(struct fw_device *device)
static int add_client_resource(struct client *client,
struct client_resource *resource, gfp_t gfp_mask)
{
- bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
int ret;
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&client->lock, flags);
+ xa_lock_irqsave(&client->resources, flags);
if (client->in_shutdown)
ret = -ECANCELED;
else
- ret = idr_alloc(&client->resource_idr, resource, 0, 0,
- GFP_NOWAIT);
+ ret = __xa_alloc(&client->resources, &resource->handle,
+ resource, xa_limit_31b, gfp_mask);
if (ret >= 0) {
- resource->handle = ret;
client_get(client);
schedule_if_iso_resource(resource);
}
- spin_unlock_irqrestore(&client->lock, flags);
- if (preload)
- idr_preload_end();
+ xa_unlock_irqrestore(&client->resources, flags);
- return ret < 0 ? ret : 0;
+ return ret;
}
static int release_client_resource(struct client *client, u32 handle,
{
struct client_resource *resource;
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
if (client->in_shutdown)
resource = NULL;
else
- resource = idr_find(&client->resource_idr, handle);
+ resource = xa_load(&client->resources, handle);
if (resource && resource->release == release)
- idr_remove(&client->resource_idr, handle);
- spin_unlock_irq(&client->lock);
+ xa_erase(&client->resources, handle);
+ xa_unlock_irq(&client->resources);
if (!(resource && resource->release == release))
return -EINVAL;
if (rcode == RCODE_COMPLETE)
memcpy(rsp->data, payload, rsp->length);
- spin_lock_irqsave(&client->lock, flags);
- idr_remove(&client->resource_idr, e->r.resource.handle);
+ xa_lock_irqsave(&client->resources, flags);
+ __xa_erase(&client->resources, e->r.resource.handle);
if (client->in_shutdown)
wake_up(&client->tx_flush_wait);
- spin_unlock_irqrestore(&client->lock, flags);
+ xa_unlock_irqrestore(&client->resources, flags);
rsp->type = FW_CDEV_EVENT_RESPONSE;
rsp->rcode = rcode;
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
NULL, 0);
- /* Drop the idr's reference */
+ /* Drop the resource's reference */
client_put(client);
}
context->drop_overflow_headers = true;
/* We only support one context at this time. */
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
if (client->iso_context != NULL) {
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
fw_iso_context_destroy(context);
return -EBUSY;
client->device->card,
iso_dma_direction(context));
if (ret < 0) {
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
fw_iso_context_destroy(context);
return ret;
}
client->iso_closure = a->closure;
client->iso_context = context;
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
a->handle = 0;
int generation, channel, bandwidth, todo;
bool skip, free, success;
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
generation = client->device->generation;
todo = r->todo;
/* Allow 1000ms grace period for other reallocations. */
todo == ISO_RES_ALLOC_ONCE ||
todo == ISO_RES_DEALLOC_ONCE;
r->generation = generation;
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
if (skip)
goto out;
todo == ISO_RES_REALLOC ||
todo == ISO_RES_ALLOC_ONCE);
/*
- * Is this generation outdated already? As long as this resource sticks
- * in the idr, it will be scheduled again for a newer generation or at
- * shutdown.
+ * Is this generation outdated already? As long as this resource
+ * sticks to the client, it will be scheduled again for a newer
+ * generation or at shutdown.
*/
if (channel == -EAGAIN &&
(todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
success = channel >= 0 || bandwidth > 0;
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
/*
* Transit from allocation to reallocation, except if the client
* requested deallocation in the meantime.
r->todo = ISO_RES_REALLOC;
/*
* Allocation or reallocation failure? Pull this resource out of the
- * idr and prepare for deletion, unless the client is shutting down.
+ * client and prepare for deletion, unless the client is shutting down.
*/
if (r->todo == ISO_RES_REALLOC && !success &&
!client->in_shutdown &&
- idr_remove(&client->resource_idr, r->resource.handle)) {
+ __xa_erase(&client->resources, r->resource.handle)) {
client_put(client);
free = true;
}
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
if (todo == ISO_RES_ALLOC && channel >= 0)
r->channels = 1ULL << channel;
struct iso_resource *r =
container_of(resource, struct iso_resource, resource);
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
r->todo = ISO_RES_DEALLOC;
schedule_iso_resource(r, 0);
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
}
static int init_iso_resource(struct client *client,
if (ret < 0)
return ret;
- spin_lock_irq(&client->lock);
+ xa_lock_irq(&client->resources);
if (client->iso_context) {
ret = fw_iso_buffer_map_dma(&client->buffer,
client->device->card,
iso_dma_direction(client->iso_context));
client->buffer_is_mapped = (ret == 0);
}
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
if (ret < 0)
goto fail;
return ret;
}
-static int is_outbound_transaction_resource(int id, void *p, void *data)
-{
- struct client_resource *resource = p;
-
- return resource->release == release_transaction;
-}
-
static int has_outbound_transactions(struct client *client)
{
- int ret;
-
- spin_lock_irq(&client->lock);
- ret = idr_for_each(&client->resource_idr,
- is_outbound_transaction_resource, NULL);
- spin_unlock_irq(&client->lock);
-
- return ret;
-}
-
-static int shutdown_resource(int id, void *p, void *data)
-{
- struct client_resource *resource = p;
- struct client *client = data;
-
- resource->release(client, resource);
- client_put(client);
+ struct client_resource *resource;
+ unsigned long index;
+ xa_for_each(&client->resources, index, resource) {
+ if (resource->release == release_transaction)
+ return 1;
+ }
return 0;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
+ struct client_resource *resource;
+ unsigned long index;
struct event *event, *next_event;
spin_lock_irq(&client->device->card->lock);
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
- /* Freeze client->resource_idr and client->event_list */
- spin_lock_irq(&client->lock);
+ /* Freeze client->resources and client->event_list */
+ xa_lock_irq(&client->resources);
client->in_shutdown = true;
- spin_unlock_irq(&client->lock);
+ xa_unlock_irq(&client->resources);
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
- idr_for_each(&client->resource_idr, shutdown_resource, client);
- idr_destroy(&client->resource_idr);
+ xa_for_each(&client->resources, index, resource) {
+ resource->release(client, resource);
+ client_put(client);
+ xa_erase(&client->resources, index);
+ }
list_for_each_entry_safe(event, next_event, &client->event_list, link)
kfree(event);