info->version[2] = DLM_PLOCK_VERSION_PATCH;
 }
 
+static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info)
+{
+       struct plock_op *op = NULL, *iter;
+
+       list_for_each_entry(iter, &recv_list, list) {
+               if (iter->info.fsid == info->fsid &&
+                   iter->info.number == info->number &&
+                   iter->info.owner == info->owner &&
+                   iter->info.pid == info->pid &&
+                   iter->info.start == info->start &&
+                   iter->info.end == info->end &&
+                   iter->info.ex == info->ex &&
+                   iter->info.wait) {
+                       op = iter;
+                       break;
+               }
+       }
+
+       return op;
+}
+
 static int check_version(struct dlm_plock_info *info)
 {
        if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
 }
 EXPORT_SYMBOL_GPL(dlm_posix_unlock);
 
+/*
+ * NOTE: This implementation can only handle async lock requests as nfs
+ * do it. It cannot handle cancellation of a pending lock request sitting
+ * in wait_event(), but for now only nfs is the only user local kernel
+ * user.
+ */
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+                    struct file_lock *fl)
+{
+       struct dlm_plock_info info;
+       struct plock_op *op;
+       struct dlm_ls *ls;
+       int rv;
+
+       /* this only works for async request for now and nfs is the only
+        * kernel user right now.
+        */
+       if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant))
+               return -EOPNOTSUPP;
+
+       ls = dlm_find_lockspace_local(lockspace);
+       if (!ls)
+               return -EINVAL;
+
+       memset(&info, 0, sizeof(info));
+       info.pid = fl->fl_pid;
+       info.ex = (fl->fl_type == F_WRLCK);
+       info.fsid = ls->ls_global_id;
+       dlm_put_lockspace(ls);
+       info.number = number;
+       info.start = fl->fl_start;
+       info.end = fl->fl_end;
+       info.owner = (__u64)fl->fl_pid;
+
+       rv = do_lock_cancel(&info);
+       switch (rv) {
+       case 0:
+               spin_lock(&ops_lock);
+               /* lock request to cancel must be on recv_list because
+                * do_lock_cancel() synchronizes it.
+                */
+               op = plock_lookup_waiter(&info);
+               if (WARN_ON_ONCE(!op)) {
+                       rv = -ENOLCK;
+                       break;
+               }
+
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
+               WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+               op->data->callback(op->data->fl, -EINTR);
+               dlm_release_plock_op(op);
+               rv = -EINTR;
+               break;
+       case -ENOENT:
+               /* if cancel wasn't successful we probably were to late
+                * or it was a non-blocking lock request, so just unlock it.
+                */
+               rv = dlm_posix_unlock(lockspace, number, file, fl);
+               break;
+       default:
+               break;
+       }
+
+       return rv;
+}
+EXPORT_SYMBOL_GPL(dlm_posix_cancel);
+
 int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
                  struct file_lock *fl)
 {
         */
        spin_lock(&ops_lock);
        if (info.wait) {
-               list_for_each_entry(iter, &recv_list, list) {
-                       if (iter->info.fsid == info.fsid &&
-                           iter->info.number == info.number &&
-                           iter->info.owner == info.owner &&
-                           iter->info.pid == info.pid &&
-                           iter->info.start == info.start &&
-                           iter->info.end == info.end &&
-                           iter->info.ex == info.ex &&
-                           iter->info.wait) {
-                               op = iter;
-                               break;
-                       }
-               }
+               op = plock_lookup_waiter(&info);
        } else {
                list_for_each_entry(iter, &recv_list, list) {
                        if (!iter->info.wait) {
 
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (cmd == F_CANCELLK) {
-               /* Hack: */
-               cmd = F_SETLK;
-               fl->fl_type = F_UNLCK;
-       }
        if (unlikely(gfs2_withdrawn(sdp))) {
                if (fl->fl_type == F_UNLCK)
                        locks_lock_file_wait(file, fl);
                return -EIO;
        }
-       if (IS_GETLK(cmd))
+       if (cmd == F_CANCELLK)
+               return dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
+       else if (IS_GETLK(cmd))
                return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
        else if (fl->fl_type == F_UNLCK)
                return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
 
         *
         * Internally, fs/dlm will pass these to a misc device, which
         * a userspace daemon will read and write to.
-        *
-        * For now, cancel requests (which happen internally only),
-        * are turned into unlocks. Most of this function taken from
-        * gfs2_lock.
         */
 
-       if (cmd == F_CANCELLK) {
-               cmd = F_SETLK;
-               fl->fl_type = F_UNLCK;
-       }
-
-       if (IS_GETLK(cmd))
+       if (cmd == F_CANCELLK)
+               return dlm_posix_cancel(conn->cc_lockspace, ino, file, fl);
+       else if (IS_GETLK(cmd))
                return dlm_posix_get(conn->cc_lockspace, ino, file, fl);
        else if (fl->fl_type == F_UNLCK)
                return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl);