/* * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* * Encode/decode NFSv4 CB basic data types * * Basic NFSv4 callback data types are defined in section 15 of RFC * 3530: "Network File System (NFS) version 4 Protocol" and section * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version * 1 Protocol"
*/
/* * If we cannot translate the error, the recovery routines should * handle it. * * Note: remaining NFSv4 error codes have values > 10000, so should * not conflict with native Linux error codes.
*/ staticint nfs_cb_stat_to_errno(int status)
{ int i;
for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { if (nfs_cb_errtbl[i].stat == status) return nfs_cb_errtbl[i].errno;
}
staticvoid update_cb_slot_table(struct nfsd4_session *ses, u32 target)
{ /* No need to do anything if nothing changed */ if (likely(target == READ_ONCE(ses->se_cb_highest_slot))) return;
spin_lock(&ses->se_lock); if (target > ses->se_cb_highest_slot) { int i;
/* * Growing the slot table. Reset any new sequences to 1. * * NB: There is some debate about whether the RFC requires this, * but the Linux client expects it.
*/ for (i = ses->se_cb_highest_slot + 1; i <= target; ++i)
ses->se_cb_seq_nr[i] = 1;
}
ses->se_cb_highest_slot = target;
spin_unlock(&ses->se_lock);
}
/* * CB_SEQUENCE4resok * * struct CB_SEQUENCE4resok { * sessionid4 csr_sessionid; * sequenceid4 csr_sequenceid; * slotid4 csr_slotid; * slotid4 csr_highest_slotid; * slotid4 csr_target_highest_slotid; * }; * * union CB_SEQUENCE4res switch (nfsstat4 csr_status) { * case NFS4_OK: * CB_SEQUENCE4resok csr_resok4; * default: * void; * }; * * Our current back channel implmentation supports a single backchannel * with a single slot.
*/ staticint decode_cb_sequence4resok(struct xdr_stream *xdr, struct nfsd4_callback *cb)
{ struct nfsd4_session *session = cb->cb_clp->cl_cb_session; int status = -ESERVERFAULT;
__be32 *p;
u32 seqid, slotid, target;
/* * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes.
*/
p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); if (unlikely(p == NULL)) goto out_overflow;
if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
dprintk("NFS: %s Invalid session id\n", __func__); goto out;
}
p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
staticint decode_cb_sequence4res(struct xdr_stream *xdr, struct nfsd4_callback *cb)
{ int status;
if (cb->cb_clp->cl_minorversion == 0) return 0;
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status); if (unlikely(status || cb->cb_seq_status)) return status;
return decode_cb_sequence4resok(xdr, cb);
}
/* * NFSv4.0 and NFSv4.1 XDR encode functions * * NFSv4.0 callback argument types are defined in section 15 of RFC * 3530: "Network File System (NFS) version 4 Protocol" and section 20 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1 * Protocol".
*/
/* * NB: Without this zero space reservation, callbacks over krb5p fail
*/ staticvoid nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr, constvoid *__unused)
{
xdr_reserve_space(xdr, 0);
}
/* * NFSv4.0 and NFSv4.1 XDR decode functions * * NFSv4.0 callback result types are defined in section 15 of RFC * 3530: "Network File System (NFS) version 4 Protocol" and section 20 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1 * Protocol".
*/
p = xdr_reserve_space(xdr, 4);
*p = cbo->co_nfserr; switch (cbo->co_nfserr) { case nfs_ok:
p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
p = xdr_encode_empty_array(p);
p = xdr_encode_hyper(p, cbo->co_res.wr_bytes_written);
*p++ = cpu_to_be32(cbo->co_res.wr_stable_how);
p = xdr_encode_opaque_fixed(p, cbo->co_res.wr_verifier.data,
NFS4_VERIFIER_SIZE); break; default:
p = xdr_reserve_space(xdr, 8); /* We always return success if bytes were written */
p = xdr_encode_hyper(p, 0);
}
}
staticunsignedint nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)]; staticconststruct rpc_version nfs_cb_version4 = { /* * Note on the callback rpc program version number: despite language in rfc * 5661 section 18.36.3 requiring servers to use 4 in this field, the * official xdr descriptions for both 4.0 and 4.1 specify version 1, and * in practice that appears to be what implementations use. The section * 18.36.3 language is expected to be fixed in an erratum.
*/
.number = 1,
.nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
.procs = nfs4_cb_procedures,
.counts = nfs4_cb_counts,
};
/* * nfsd4_lease is set to at most one hour in __nfsd4_write_time, * so we can use 32-bit math on it. Warn if that assumption * ever stops being true.
*/ if (WARN_ON_ONCE(nn->nfsd4_lease > 3600)) return 360 * HZ;
staticconststruct rpc_call_ops nfsd4_cb_probe_ops = { /* XXX: release method to ensure we set the cb channel down if
* necessary on early failure? */
.rpc_call_done = nfsd4_cb_probe_done,
.rpc_release = nfsd4_cb_probe_release,
};
/* * Poke the callback thread to process any updates to the callback * parameters, and send a null probe.
*/ void nfsd4_probe_callback(struct nfs4_client *clp)
{
trace_nfsd_cb_probe(clp);
nfsd4_mark_cb_state(clp, NFSD4_CB_UNKNOWN);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null);
}
staticint grab_slot(struct nfsd4_session *ses)
{ int idx;
spin_lock(&ses->se_lock);
idx = ffs(ses->se_cb_slot_avail) - 1; if (idx < 0 || idx > ses->se_cb_highest_slot) {
spin_unlock(&ses->se_lock); return -1;
} /* clear the bit for the slot */
ses->se_cb_slot_avail &= ~BIT(idx);
spin_unlock(&ses->se_lock); return idx;
}
/* * There's currently a single callback channel slot. * If the slot is available, then mark it busy. Otherwise, set the * thread for sleeping on the callback RPC wait queue.
*/ staticbool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
{ struct nfs4_client *clp = cb->cb_clp; struct nfsd4_session *ses = clp->cl_cb_session;
if (cb->cb_ops && cb->cb_ops->release)
cb->cb_ops->release(cb);
nfsd41_cb_inflight_end(clp);
}
/** * nfsd41_cb_referring_call - add a referring call to a callback operation * @cb: context of callback to add the rc to * @sessionid: referring call's session ID * @slotid: referring call's session slot index * @seqno: referring call's slot sequence number * * Caller serializes access to @cb. * * NB: If memory allocation fails, the referring call is not added.
*/ void nfsd41_cb_referring_call(struct nfsd4_callback *cb, struct nfs4_sessionid *sessionid,
u32 slotid, u32 seqno)
{ struct nfsd4_referring_call_list *rcl; struct nfsd4_referring_call *rc; bool found;
might_sleep();
found = false;
list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) { if (!memcmp(rcl->rcl_sessionid.data, sessionid->data,
NFS4_MAX_SESSIONID_LEN)) {
found = true; break;
}
} if (!found) {
rcl = kmalloc(sizeof(*rcl), GFP_KERNEL); if (!rcl) return;
memcpy(rcl->rcl_sessionid.data, sessionid->data,
NFS4_MAX_SESSIONID_LEN);
rcl->__nr_referring_calls = 0;
INIT_LIST_HEAD(&rcl->rcl_referring_calls);
list_add(&rcl->__list, &cb->cb_referring_call_list);
cb->cb_nr_referring_call_list++;
}
found = false;
list_for_each_entry(rc, &rcl->rcl_referring_calls, __list) { if (rc->rc_sequenceid == seqno && rc->rc_slotid == slotid) {
found = true; break;
}
} if (!found) {
rc = kmalloc(sizeof(*rc), GFP_KERNEL); if (!rc) goto out;
rc->rc_sequenceid = seqno;
rc->rc_slotid = slotid;
rcl->__nr_referring_calls++;
list_add(&rc->__list, &rcl->rcl_referring_calls);
}
out: if (!rcl->__nr_referring_calls) {
cb->cb_nr_referring_call_list--;
list_del(&rcl->__list);
kfree(rcl);
}
}
/** * nfsd41_cb_destroy_referring_call_list - release referring call info * @cb: context of a callback that has completed * * Callers who allocate referring calls using nfsd41_cb_referring_call() must * release those resources by calling nfsd41_cb_destroy_referring_call_list. * * Caller serializes access to @cb.
*/ void nfsd41_cb_destroy_referring_call_list(struct nfsd4_callback *cb)
{ struct nfsd4_referring_call_list *rcl; struct nfsd4_referring_call *rc;
while (!list_empty(&cb->cb_referring_call_list)) {
rcl = list_first_entry(&cb->cb_referring_call_list, struct nfsd4_referring_call_list,
__list);
/* * cb_seq_status is only set in decode_cb_sequence4res, * and so will remain 1 if an rpc level failure occurs.
*/
trace_nfsd_cb_rpc_prepare(clp);
cb->cb_seq_status = 1;
cb->cb_status = 0; if (minorversion && !nfsd41_cb_get_slot(cb, task)) return;
rpc_call_start(task);
}
/* Returns true if CB_COMPOUND processing should continue */ staticbool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
{ struct nfsd4_session *session = cb->cb_clp->cl_cb_session; bool ret = false;
if (cb->cb_held_slot < 0) goto requeue;
/* This is the operation status code for CB_SEQUENCE */
trace_nfsd_cb_seq_status(task, cb); switch (cb->cb_seq_status) { case 0: /* * No need for lock, access serialized in nfsd4_cb_prepare * * RFC5661 20.9.3 * If CB_SEQUENCE returns an error, then the state of the slot * (sequence ID, cached reply) MUST NOT change.
*/
++session->se_cb_seq_nr[cb->cb_held_slot];
ret = true; break; case -ESERVERFAULT: /* * Call succeeded, but the session, slot index, or slot * sequence number in the response do not match the same * in the server's call. The sequence information is thus * untrustworthy.
*/
nfsd4_mark_cb_fault(cb->cb_clp); break; case 1: /* * cb_seq_status remains 1 if an RPC Reply was never * received. NFSD can't know if the client processed * the CB_SEQUENCE operation. Ask the client to send a * DESTROY_SESSION to recover.
*/
fallthrough; case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp); goto requeue; case -NFS4ERR_DELAY:
cb->cb_seq_status = 1; if (RPC_SIGNALLED(task) || !rpc_restart_call(task)) goto requeue;
rpc_delay(task, 2 * HZ); returnfalse; case -NFS4ERR_SEQ_MISORDERED: case -NFS4ERR_BADSLOT: /* * A SEQ_MISORDERED or BADSLOT error means that the client and * server are out of sync as to the backchannel parameters. Mark * the backchannel faulty and restart the RPC, but leak the slot * so that it's no longer used.
*/
nfsd4_mark_cb_fault(cb->cb_clp);
cb->cb_held_slot = -1; goto retry_nowait; default:
nfsd4_mark_cb_fault(cb->cb_clp);
}
trace_nfsd_cb_free_slot(task, cb);
nfsd41_cb_release_slot(cb); return ret;
retry_nowait: /* * RPC_SIGNALLED() means that the rpc_client is being torn down and * (possibly) recreated. Requeue the call in that case.
*/ if (!RPC_SIGNALLED(task)) { if (rpc_restart_call_prepare(task)) returnfalse;
}
requeue:
nfsd41_cb_release_slot(cb);
nfsd4_requeue_cb(task, cb); returnfalse;
}
if (!clp->cl_minorversion) { /* * If the backchannel connection was shut down while this * task was queued, we need to resubmit it after setting up * a new backchannel connection. * * Note that if we lost our callback connection permanently * the submission code will error out, so we don't need to * handle that case here.
*/ if (RPC_SIGNALLED(task))
nfsd4_requeue_cb(task, cb);
} elseif (!nfsd4_cb_sequence_done(task, cb)) { return;
}
/* must be called under the state lock */ void nfsd4_shutdown_callback(struct nfs4_client *clp)
{ if (clp->cl_cb_state != NFSD4_CB_UNKNOWN)
trace_nfsd_cb_shutdown(clp);
set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags); /* * Note this won't actually result in a null callback; * instead, nfsd4_run_cb_null() will detect the killed * client, destroy the rpc client, and stop:
*/
nfsd4_run_cb(&clp->cl_cb_null);
flush_workqueue(clp->cl_callback_wq);
nfsd41_cb_inflight_wait_complete(clp);
}
/* * Note there isn't a lot of locking in this code; instead we depend on * the fact that it is run from clp->cl_callback_wq, which won't run two * work items at once. So, for example, clp->cl_callback_wq handles all * access of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
*/ staticvoid nfsd4_process_cb_update(struct nfsd4_callback *cb)
{ struct nfs4_cb_conn conn; struct nfs4_client *clp = cb->cb_clp; struct nfsd4_session *ses = NULL; struct nfsd4_conn *c; int err;
trace_nfsd_cb_bc_update(clp, cb);
/* * This is either an update, or the client dying; in either case, * kill the old client:
*/ if (clp->cl_cb_client) {
trace_nfsd_cb_bc_shutdown(clp, cb);
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
clp->cl_cb_cred = NULL;
} if (clp->cl_cb_conn.cb_xprt) {
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
clp->cl_cb_conn.cb_xprt = NULL;
} if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) return;
spin_lock(&clp->cl_lock); /* * Only serialized callback code is allowed to clear these * flags; main nfsd code can only set them:
*/
WARN_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp); if (c) {
svc_xprt_get(c->cn_xprt);
conn.cb_xprt = c->cn_xprt;
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
err = setup_callback_client(clp, &conn, ses); if (err) {
nfsd4_mark_cb_down(clp); if (c)
svc_xprt_put(c->cn_xprt); return;
}
}
/** * nfsd4_run_cb - queue up a callback job to run * @cb: callback to queue * * Kick off a callback to do its thing. Returns false if it was already * on a queue, true otherwise.
*/ bool nfsd4_run_cb(struct nfsd4_callback *cb)
{ struct nfs4_client *clp = cb->cb_clp; bool queued;
nfsd41_cb_inflight_begin(clp);
queued = nfsd4_queue_cb(cb); if (!queued)
nfsd41_cb_inflight_end(clp); return queued;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.