static void nlmclnt_unlock_callback(struct rpc_task *);
static void nlmclnt_cancel_callback(struct rpc_task *);
static int nlm_stat_to_errno(u32 stat);
+static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
/*
* Cookie counter for NLM requests
/*
* Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
*/
-static inline void
-nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
+static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
locks_copy_lock(&lock->fl, fl);
}
+static void nlmclnt_release_lockargs(struct nlm_rqst *req)
+{
+ struct file_lock *fl = &req->a_args.lock.fl;
+
+ if (fl->fl_ops && fl->fl_ops->fl_release_private)
+ fl->fl_ops->fl_release_private(fl);
+}
+
/*
* Initialize arguments for GRANTED call. The nlm_rqst structure
* has been cleared already.
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
- if (!data)
+ if (!data) {
+ nlmclnt_freegrantargs(call);
return 0;
+ }
call->a_args.lock.oh.data = (u8 *) data;
}
void
nlmclnt_freegrantargs(struct nlm_rqst *call)
{
+ struct file_lock *fl = &call->a_args.lock.fl;
/*
* Check whether we allocated memory for the owner.
*/
if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
kfree(call->a_args.lock.oh.data);
}
+ if (fl->fl_ops && fl->fl_ops->fl_release_private)
+ fl->fl_ops->fl_release_private(fl);
}
/*
}
call->a_host = host;
+ nlmclnt_locks_init_private(fl, host);
+
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
else
status = -EINVAL;
- if (status < 0 && (call->a_flags & RPC_TASK_ASYNC))
- kfree(call);
-
out_restore:
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = oldset;
{
int status;
- if ((status = nlmclnt_call(req, NLMPROC_TEST)) < 0)
+ status = nlmclnt_call(req, NLMPROC_TEST);
+ nlmclnt_release_lockargs(req);
+ if (status < 0)
return status;
status = req->a_res.status;
} if (status == NLM_LCK_DENIED) {
/*
* Report the conflicting lock back to the application.
- * FIXME: Is it OK to report the pid back as well?
*/
locks_copy_lock(fl, &req->a_res.lock.fl);
- /* fl->fl_pid = 0; */
+ fl->fl_pid = 0;
} else {
return nlm_stat_to_errno(req->a_res.status);
}
return 0;
}
-static
-void nlmclnt_insert_lock_callback(struct file_lock *fl)
+static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
- nlm_get_host(fl->fl_u.nfs_fl.host);
+ memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
+ nlm_get_host(new->fl_u.nfs_fl.host);
}
-static
-void nlmclnt_remove_lock_callback(struct file_lock *fl)
+
+static void nlmclnt_locks_release_private(struct file_lock *fl)
{
- if (fl->fl_u.nfs_fl.host) {
- nlm_release_host(fl->fl_u.nfs_fl.host);
- fl->fl_u.nfs_fl.host = NULL;
- }
+ nlm_release_host(fl->fl_u.nfs_fl.host);
+ fl->fl_ops = NULL;
+}
+
+static struct file_lock_operations nlmclnt_lock_ops = {
+ .fl_copy_lock = nlmclnt_locks_copy_lock,
+ .fl_release_private = nlmclnt_locks_release_private,
+};
+
+static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
+{
+ BUG_ON(fl->fl_ops != NULL);
+ fl->fl_u.nfs_fl.state = 0;
+ fl->fl_u.nfs_fl.flags = 0;
+ fl->fl_u.nfs_fl.host = nlm_get_host(host);
+ fl->fl_ops = &nlmclnt_lock_ops;
}
/*
if (!host->h_monitored && nsm_monitor(host) < 0) {
printk(KERN_NOTICE "lockd: failed to monitor %s\n",
host->h_name);
- return -ENOLCK;
+ status = -ENOLCK;
+ goto out;
}
do {
status = nlmclnt_block(host, fl, &resp->status);
}
if (status < 0)
- return status;
+ goto out;
} while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);
if (resp->status == NLM_LCK_GRANTED) {
fl->fl_u.nfs_fl.state = host->h_state;
fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
- fl->fl_u.nfs_fl.host = host;
- fl->fl_insert = nlmclnt_insert_lock_callback;
- fl->fl_remove = nlmclnt_remove_lock_callback;
}
-
- return nlm_stat_to_errno(resp->status);
+ status = nlm_stat_to_errno(resp->status);
+out:
+ nlmclnt_release_lockargs(req);
+ return status;
}
/*
fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
if (req->a_flags & RPC_TASK_ASYNC) {
- return nlmclnt_async_call(req, NLMPROC_UNLOCK,
+ status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
nlmclnt_unlock_callback);
+ if (status < 0) {
+ nlmclnt_release_lockargs(req);
+ kfree(req);
+ }
+ return status;
}
- if ((status = nlmclnt_call(req, NLMPROC_UNLOCK)) < 0)
+ status = nlmclnt_call(req, NLMPROC_UNLOCK);
+ nlmclnt_release_lockargs(req);
+ if (status < 0)
return status;
if (resp->status == NLM_LCK_GRANTED)
die:
nlm_release_host(req->a_host);
+ nlmclnt_release_lockargs(req);
kfree(req);
return;
retry_rebind:
status = nlmclnt_async_call(req, NLMPROC_CANCEL,
nlmclnt_cancel_callback);
- if (status < 0)
+ if (status < 0) {
+ nlmclnt_release_lockargs(req);
kfree(req);
+ }
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = oldset;
die:
nlm_release_host(req->a_host);
+ nlmclnt_release_lockargs(req);
kfree(req);
return;