staging: lustre: Fix typo in lustre/ptlrpc/gss
This patch fixed spelling typo in comments within lustre/ptlrpc/gss. Signed-off-by: Masanari Iida <standby24x7@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
08c4c90dc2
commit
fb4b81fea5
5 changed files with 7 additions and 7 deletions
|
@ -336,7 +336,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
|
||||||
if (rc) {
|
if (rc) {
|
||||||
/* If any _real_ denial be made, we expect server return
|
/* If any _real_ denial be made, we expect server return
|
||||||
* -EACCES reply or return success but indicate gss error
|
* -EACCES reply or return success but indicate gss error
|
||||||
* inside reply messsage. All other errors are treated as
|
* inside reply message. All other errors are treated as
|
||||||
* timeout, caller might try the negotiation repeatedly,
|
* timeout, caller might try the negotiation repeatedly,
|
||||||
* leave recovery decisions to general ptlrpc layer.
|
* leave recovery decisions to general ptlrpc layer.
|
||||||
*
|
*
|
||||||
|
|
|
@ -1176,7 +1176,7 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* called with key semaphore write locked. it means we can operate
|
* called with key semaphore write locked. it means we can operate
|
||||||
* on the context without fear of loosing refcount.
|
* on the context without fear of losing refcount.
|
||||||
*/
|
*/
|
||||||
static
|
static
|
||||||
int gss_kt_update(struct key *key, const void *data, size_t datalen)
|
int gss_kt_update(struct key *key, const void *data, size_t datalen)
|
||||||
|
|
|
@ -85,7 +85,7 @@ static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************
|
/****************************************
|
||||||
* internel context helpers *
|
* internal context helpers *
|
||||||
****************************************/
|
****************************************/
|
||||||
|
|
||||||
static
|
static
|
||||||
|
@ -652,7 +652,7 @@ __u32 mech_name2idx(const char *name)
|
||||||
|
|
||||||
/* pipefs dentries for each mechanisms */
|
/* pipefs dentries for each mechanisms */
|
||||||
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
|
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
|
||||||
/* all upcall messgaes linked here */
|
/* all upcall messages linked here */
|
||||||
static struct list_head upcall_lists[MECH_MAX];
|
static struct list_head upcall_lists[MECH_MAX];
|
||||||
/* and protected by this */
|
/* and protected by this */
|
||||||
static spinlock_t upcall_locks[MECH_MAX];
|
static spinlock_t upcall_locks[MECH_MAX];
|
||||||
|
|
|
@ -586,7 +586,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* currently the expiry time passed down from user-space
|
/* currently the expiry time passed down from user-space
|
||||||
* is invalid, here we retrive it from mech. */
|
* is invalid, here we retrieve it from mech. */
|
||||||
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
|
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
|
||||||
CERROR("unable to get expire time, drop it\n");
|
CERROR("unable to get expire time, drop it\n");
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1067,7 +1067,7 @@ int __init gss_init_svc_upcall(void)
|
||||||
* the init upcall channel, otherwise there's big chance that the first
|
* the init upcall channel, otherwise there's big chance that the first
|
||||||
* upcall issued before the channel be opened thus nfsv4 cache code will
|
* upcall issued before the channel be opened thus nfsv4 cache code will
|
||||||
* drop the request direclty, thus lead to unnecessary recovery time.
|
* drop the request direclty, thus lead to unnecessary recovery time.
|
||||||
* here we wait at miximum 1.5 seconds. */
|
* here we wait at maximum 1.5 seconds. */
|
||||||
for (i = 0; i < 6; i++) {
|
for (i = 0; i < 6; i++) {
|
||||||
if (atomic_read(&rsi_cache.readers) > 0)
|
if (atomic_read(&rsi_cache.readers) > 0)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1215,7 +1215,7 @@ int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
|
||||||
/*
|
/*
|
||||||
* remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
|
* remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
|
||||||
* this is to avoid potential problems of client side reverse svc ctx
|
* this is to avoid potential problems of client side reverse svc ctx
|
||||||
* be mis-destroyed in various recovery senarios. anyway client can
|
* be mis-destroyed in various recovery scenarios. anyway client can
|
||||||
* manage its reverse ctx well by associating it with its buddy ctx.
|
* manage its reverse ctx well by associating it with its buddy ctx.
|
||||||
*/
|
*/
|
||||||
if (sec_is_reverse(sec))
|
if (sec_is_reverse(sec))
|
||||||
|
|
Loading…
Add table
Reference in a new issue