/* used by in-kernel data structures */
struct kern_ipc_perm
{
- spinlock_t lock;
+ rwlock_t lock;
int deleted;
int id;
key_t key;
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
+#define sem_read_unlock(sma) ipc_read_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
return container_of(ipcp, struct sem_array, sem_perm);
}
+static inline struct sem_array *sem_read_lock_check(struct ipc_namespace *ns,
+ int id)
+{
+ struct kern_ipc_perm *ipcp = ipc_read_lock_check(&sem_ids(ns), id);
+
+ if (IS_ERR(ipcp))
+ return (struct sem_array *)ipcp;
+
+ return container_of(ipcp, struct sem_array, sem_perm);
+}
+
static inline void sem_lock_and_putref(struct sem_array *sma)
{
ipc_lock_by_ptr(&sma->sem_perm);
ipc_unlock(&(sma)->sem_perm);
}
+static inline void sem_getref_and_read_unlock(struct sem_array *sma)
+{
+ ipc_rcu_getref(sma);
+ ipc_read_unlock(&(sma)->sem_perm);
+}
+
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ rcu_read_unlock();
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
int i;
/* Free the existing undo structures for this semaphore set. */
- assert_spin_locked(&sma->sem_perm.lock);
+ //assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
list_del(&un->list_id);
spin_lock(&un->ulp->lock);
spin_unlock(&curr->lock);
}
- assert_spin_locked(&sma->sem_perm.lock);
+ //assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id) {
for (i = 0; i < nsems; i++)
un->semadj[i] = 0;
if (val > SEMVMX || val < 0)
goto out_unlock;
- assert_spin_locked(&sma->sem_perm.lock);
+ //assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
new->semid = semid;
assert_spin_locked(&ulp->lock);
list_add_rcu(&new->list_proc, &ulp->list_proc);
- assert_spin_locked(&sma->sem_perm.lock);
+ //assert_spin_locked(&sma->sem_perm.lock);
list_add(&new->list_id, &sma->list_id);
un = new;
INIT_LIST_HEAD(&tasks);
- sma = sem_lock_check(ns, semid);
+ sma = sem_read_lock_check(ns, semid);
if (IS_ERR(sma)) {
if (un)
rcu_read_unlock();
* per-semaphore locks instead.
*/
if (!un)
- sem_getref_and_unlock(sma);
+ sem_getref_and_read_unlock(sma);
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current),
&pending, &blocker);
spin_unlock(&blocker->lock);
if (un)
- sem_getref_and_unlock(sma);
+ sem_getref_and_read_unlock(sma);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
goto out_wakeup;
out_unlock_free:
- sem_unlock(sma);
+ sem_read_unlock(sma);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
}
/* remove un from the linked lists */
- assert_spin_locked(&sma->sem_perm.lock);
+ //assert_spin_locked(&sma->sem_perm.lock);
list_del(&un->list_id);
spin_lock(&ulp->lock);
if (ids->in_use >= size)
return -ENOSPC;
- spin_lock_init(&new->lock);
+ rwlock_init(&new->lock);
new->deleted = 0;
rcu_read_lock();
- spin_lock(&new->lock);
+ write_lock(&new->lock);
err = idr_get_new(&ids->ipcs_idr, new, &id);
if (err) {
- spin_unlock(&new->lock);
+ write_unlock(&new->lock);
rcu_read_unlock();
return err;
}
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
if (out) {
out += HDRLEN_VMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
}
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
if (out) {
out += HDRLEN_KMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
}
}
void ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ atomic_inc(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
return ERR_PTR(-EINVAL);
}
- spin_lock(&out->lock);
+ write_lock(&out->lock);
/* ipc_rmid() may have already freed the ID while ipc_lock
* was spinning: here verify that the structure is still valid
*/
if (out->deleted) {
- spin_unlock(&out->lock);
+ write_unlock(&out->lock);
+ rcu_read_unlock();
+ return ERR_PTR(-EINVAL);
+ }
+
+ return out;
+}
+
+struct kern_ipc_perm *ipc_read_lock(struct ipc_ids *ids, int id)
+{
+ struct kern_ipc_perm *out;
+ int lid = ipcid_to_idx(id);
+
+ rcu_read_lock();
+ out = idr_find(&ids->ipcs_idr, lid);
+ if (out == NULL) {
+ rcu_read_unlock();
+ return ERR_PTR(-EINVAL);
+ }
+
+ read_lock(&out->lock);
+
+ /* ipc_rmid() may have already freed the ID while ipc_lock
+ * was spinning: here verify that the structure is still valid
+ */
+ if (out->deleted) {
+ read_unlock(&out->lock);
rcu_read_unlock();
return ERR_PTR(-EINVAL);
}
return out;
}
+struct kern_ipc_perm *ipc_read_lock_check(struct ipc_ids *ids, int id)
+{
+ struct kern_ipc_perm *out;
+
+ out = ipc_read_lock(ids, id);
+ if (IS_ERR(out))
+ return out;
+
+ if (ipc_checkid(out, id)) {
+ ipc_read_unlock(out);
+ return ERR_PTR(-EIDRM);
+ }
+
+ return out;
+}
+
/**
* ipcget - Common sys_*get() code
* @ns : namsepace
static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
{
rcu_read_lock();
- spin_lock(&perm->lock);
+ write_lock(&perm->lock);
}
static inline void ipc_unlock(struct kern_ipc_perm *perm)
{
- spin_unlock(&perm->lock);
+ write_unlock(&perm->lock);
+ rcu_read_unlock();
+}
+
+static inline void ipc_read_unlock(struct kern_ipc_perm *perm)
+{
+ read_unlock(&perm->lock);
rcu_read_unlock();
}
struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
+struct kern_ipc_perm *ipc_read_lock_check(struct ipc_ids *ids, int id);
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params);
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,