diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/nfs/callback.h | 8 | ||||
| -rw-r--r-- | fs/nfs/callback_proc.c | 165 | ||||
| -rw-r--r-- | fs/nfs/callback_xdr.c | 105 | ||||
| -rw-r--r-- | fs/nfs/client.c | 48 | ||||
| -rw-r--r-- | fs/nfs/dns_resolve.c | 18 | ||||
| -rw-r--r-- | fs/nfs/file.c | 30 | ||||
| -rw-r--r-- | fs/nfs/inode.c | 7 | ||||
| -rw-r--r-- | fs/nfs/nfs3proc.c | 9 | ||||
| -rw-r--r-- | fs/nfs/nfs4_fs.h | 2 | ||||
| -rw-r--r-- | fs/nfs/nfs4proc.c | 103 | ||||
| -rw-r--r-- | fs/nfs/nfs4renewd.c | 24 | ||||
| -rw-r--r-- | fs/nfs/nfs4state.c | 118 | ||||
| -rw-r--r-- | fs/nfs/nfs4xdr.c | 10 | ||||
| -rw-r--r-- | fs/nfs/proc.c | 41 | 
14 files changed, 510 insertions, 178 deletions
| diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index d4036be0b589..85a7cfd1b8dd 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -119,6 +119,14 @@ struct cb_recallanyargs {  };  extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); + +struct cb_recallslotargs { +	struct sockaddr	*crsa_addr; +	uint32_t	crsa_target_max_slots; +}; +extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, +					  void *dummy); +  #endif /* CONFIG_NFS_V4_1 */  extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index defa9b4c470e..84761b5bb8e2 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -143,44 +143,49 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n   * Return success if the sequenceID is one more than what we last saw on   * this slot, accounting for wraparound.  Increments the slot's sequence.   * - * We don't yet implement a duplicate request cache, so at this time - * we will log replays, and process them as if we had not seen them before, - * but we don't bump the sequence in the slot.  Not too worried about it, + * We don't yet implement a duplicate request cache, instead we set the + * back channel ca_maxresponsesize_cached to zero. This is OK for now   * since we only currently implement idempotent callbacks anyway.   *   * We have a single slot backchannel at this time, so we don't bother   * checking the used_slots bit array on the table.  The lower layer guarantees   * a single outstanding callback request at a time.   */ -static int -validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) +static __be32 +validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)  {  	struct nfs4_slot *slot;  	dprintk("%s enter. slotid %d seqid %d\n", -		__func__, slotid, seqid); +		__func__, args->csa_slotid, args->csa_sequenceid); -	if (slotid > NFS41_BC_MAX_CALLBACKS) +	if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)  		return htonl(NFS4ERR_BADSLOT); -	slot = tbl->slots + slotid; +	slot = tbl->slots + args->csa_slotid;  	dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);  	/* Normal */ -	if (likely(seqid == slot->seq_nr + 1)) { +	if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {  		slot->seq_nr++;  		return htonl(NFS4_OK);  	}  	/* Replay */ -	if (seqid == slot->seq_nr) { -		dprintk("%s seqid %d is a replay - no DRC available\n", -			__func__, seqid); -		return htonl(NFS4_OK); +	if (args->csa_sequenceid == slot->seq_nr) { +		dprintk("%s seqid %d is a replay\n", +			__func__, args->csa_sequenceid); +		/* Signal process_op to set this error on next op */ +		if (args->csa_cachethis == 0) +			return htonl(NFS4ERR_RETRY_UNCACHED_REP); + +		/* The ca_maxresponsesize_cached is 0 with no DRC */ +		else if (args->csa_cachethis == 1) +			return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);  	}  	/* Wraparound */ -	if (seqid == 1 && (slot->seq_nr + 1) == 0) { +	if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {  		slot->seq_nr = 1;  		return htonl(NFS4_OK);  	} @@ -225,27 +230,87 @@ validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)  	return NULL;  } -/* FIXME: referring calls should be processed */ -unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, +/* + * For each referring call triple, check the session's slot table for + * a match.  If the slot is in use and the sequence numbers match, the + * client is still waiting for a response to the original request. + */ +static bool referring_call_exists(struct nfs_client *clp, +				  uint32_t nrclists, +				  struct referring_call_list *rclists) +{ +	bool status = 0; +	int i, j; +	struct nfs4_session *session; +	struct nfs4_slot_table *tbl; +	struct referring_call_list *rclist; +	struct referring_call *ref; + +	/* +	 * XXX When client trunking is implemented, this becomes +	 * a session lookup from within the loop +	 */ +	session = clp->cl_session; +	tbl = &session->fc_slot_table; + +	for (i = 0; i < nrclists; i++) { +		rclist = &rclists[i]; +		if (memcmp(session->sess_id.data, +			   rclist->rcl_sessionid.data, +			   NFS4_MAX_SESSIONID_LEN) != 0) +			continue; + +		for (j = 0; j < rclist->rcl_nrefcalls; j++) { +			ref = &rclist->rcl_refcalls[j]; + +			dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " +				"slotid %u\n", __func__, +				((u32 *)&rclist->rcl_sessionid.data)[0], +				((u32 *)&rclist->rcl_sessionid.data)[1], +				((u32 *)&rclist->rcl_sessionid.data)[2], +				((u32 *)&rclist->rcl_sessionid.data)[3], +				ref->rc_sequenceid, ref->rc_slotid); + +			spin_lock(&tbl->slot_tbl_lock); +			status = (test_bit(ref->rc_slotid, tbl->used_slots) && +				  tbl->slots[ref->rc_slotid].seq_nr == +					ref->rc_sequenceid); +			spin_unlock(&tbl->slot_tbl_lock); +			if (status) +				goto out; +		} +	} + +out: +	return status; +} + +__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,  				struct cb_sequenceres *res)  {  	struct nfs_client *clp; -	int i, status; - -	for (i = 0; i < args->csa_nrclists; i++) -		kfree(args->csa_rclists[i].rcl_refcalls); -	kfree(args->csa_rclists); +	int i; +	__be32 status;  	status = htonl(NFS4ERR_BADSESSION);  	clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);  	if (clp == NULL)  		goto out; -	status = validate_seqid(&clp->cl_session->bc_slot_table, -				args->csa_slotid, args->csa_sequenceid); +	status = validate_seqid(&clp->cl_session->bc_slot_table, args);  	if (status)  		goto out_putclient; +	/* +	 * Check for pending referring calls.  If a match is found, a +	 * related callback was received before the response to the original +	 * call. +	 */ +	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { +		status = htonl(NFS4ERR_DELAY); +		goto out_putclient; +	} +  	memcpy(&res->csr_sessionid, &args->csa_sessionid,  	       sizeof(res->csr_sessionid));  	res->csr_sequenceid = args->csa_sequenceid; @@ -256,15 +321,23 @@ unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,  out_putclient:  	nfs_put_client(clp);  out: -	dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); -	res->csr_status = status; -	return res->csr_status; +	for (i = 0; i < args->csa_nrclists; i++) +		kfree(args->csa_rclists[i].rcl_refcalls); +	kfree(args->csa_rclists); + +	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) +		res->csr_status = 0; +	else +		res->csr_status = status; +	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, +		ntohl(status), ntohl(res->csr_status)); +	return status;  } -unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) +__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)  {  	struct nfs_client *clp; -	int status; +	__be32 status;  	fmode_t flags = 0;  	status = htonl(NFS4ERR_OP_NOT_IN_SESSION); @@ -289,4 +362,40 @@ out:  	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));  	return status;  } + +/* Reduce the fore channel's max_slots to the target value */ +__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) +{ +	struct nfs_client *clp; +	struct nfs4_slot_table *fc_tbl; +	__be32 status; + +	status = htonl(NFS4ERR_OP_NOT_IN_SESSION); +	clp = nfs_find_client(args->crsa_addr, 4); +	if (clp == NULL) +		goto out; + +	dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", +		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), +		args->crsa_target_max_slots); + +	fc_tbl = &clp->cl_session->fc_slot_table; + +	status = htonl(NFS4ERR_BAD_HIGH_SLOT); +	if (args->crsa_target_max_slots > fc_tbl->max_slots || +	    args->crsa_target_max_slots < 1) +		goto out_putclient; + +	status = htonl(NFS4_OK); +	if (args->crsa_target_max_slots == fc_tbl->max_slots) +		goto out_putclient; + +	fc_tbl->target_max_slots = args->crsa_target_max_slots; +	nfs41_handle_recall_slot(clp); +out_putclient: +	nfs_put_client(clp);	/* balance nfs_find_client */ +out: +	dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); +	return status; +}  #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 8e1a2511c8be..db30c0b398b5 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -24,10 +24,14 @@  #define CB_OP_SEQUENCE_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ + \  					4 + 1 + 3)  #define CB_OP_RECALLANY_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ) +#define CB_OP_RECALLSLOT_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)  #endif /* CONFIG_NFS_V4_1 */  #define NFSDBG_FACILITY NFSDBG_CALLBACK +/* Internal error code */ +#define NFS4ERR_RESOURCE_HDR	11050 +  typedef __be32 (*callback_process_op_t)(void *, void *);  typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);  typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); @@ -173,7 +177,7 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)  	__be32 *p;  	p = read_buf(xdr, 4);  	if (unlikely(p == NULL)) -		return htonl(NFS4ERR_RESOURCE); +		return htonl(NFS4ERR_RESOURCE_HDR);  	*op = ntohl(*p);  	return 0;  } @@ -215,10 +219,10 @@ out:  #if defined(CONFIG_NFS_V4_1) -static unsigned decode_sessionid(struct xdr_stream *xdr, +static __be32 decode_sessionid(struct xdr_stream *xdr,  				 struct nfs4_sessionid *sid)  { -	uint32_t *p; +	__be32 *p;  	int len = NFS4_MAX_SESSIONID_LEN;  	p = read_buf(xdr, len); @@ -229,12 +233,12 @@ static unsigned decode_sessionid(struct xdr_stream *xdr,  	return 0;  } -static unsigned decode_rc_list(struct xdr_stream *xdr, +static __be32 decode_rc_list(struct xdr_stream *xdr,  			       struct referring_call_list *rc_list)  { -	uint32_t *p; +	__be32 *p;  	int i; -	unsigned status; +	__be32 status;  	status = decode_sessionid(xdr, &rc_list->rcl_sessionid);  	if (status) @@ -267,13 +271,13 @@ out:  	return status;  } -static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, +static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,  					struct xdr_stream *xdr,  					struct cb_sequenceargs *args)  { -	uint32_t *p; +	__be32 *p;  	int i; -	unsigned status; +	__be32 status;  	status = decode_sessionid(xdr, &args->csa_sessionid);  	if (status) @@ -327,11 +331,11 @@ out_free:  	goto out;  } -static unsigned decode_recallany_args(struct svc_rqst *rqstp, +static __be32 decode_recallany_args(struct svc_rqst *rqstp,  				      struct xdr_stream *xdr,  				      struct cb_recallanyargs *args)  { -	uint32_t *p; +	__be32 *p;  	args->craa_addr = svc_addr(rqstp);  	p = read_buf(xdr, 4); @@ -346,6 +350,20 @@ static unsigned decode_recallany_args(struct svc_rqst *rqstp,  	return 0;  } +static __be32 decode_recallslot_args(struct svc_rqst *rqstp, +					struct xdr_stream *xdr, +					struct cb_recallslotargs *args) +{ +	__be32 *p; + +	args->crsa_addr = svc_addr(rqstp); +	p = read_buf(xdr, 4); +	if (unlikely(p == NULL)) +		return htonl(NFS4ERR_BADXDR); +	args->crsa_target_max_slots = ntohl(*p++); +	return 0; +} +  #endif /* CONFIG_NFS_V4_1 */  static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) @@ -465,7 +483,7 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)  	p = xdr_reserve_space(xdr, 8);  	if (unlikely(p == NULL)) -		return htonl(NFS4ERR_RESOURCE); +		return htonl(NFS4ERR_RESOURCE_HDR);  	*p++ = htonl(op);  	*p = res;  	return 0; @@ -499,10 +517,10 @@ out:  #if defined(CONFIG_NFS_V4_1) -static unsigned encode_sessionid(struct xdr_stream *xdr, +static __be32 encode_sessionid(struct xdr_stream *xdr,  				 const struct nfs4_sessionid *sid)  { -	uint32_t *p; +	__be32 *p;  	int len = NFS4_MAX_SESSIONID_LEN;  	p = xdr_reserve_space(xdr, len); @@ -513,11 +531,11 @@ static unsigned encode_sessionid(struct xdr_stream *xdr,  	return 0;  } -static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp, +static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,  				       struct xdr_stream *xdr,  				       const struct cb_sequenceres *res)  { -	uint32_t *p; +	__be32 *p;  	unsigned status = res->csr_status;  	if (unlikely(status != 0)) @@ -554,6 +572,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)  	case OP_CB_RECALL:  	case OP_CB_SEQUENCE:  	case OP_CB_RECALL_ANY: +	case OP_CB_RECALL_SLOT:  		*op = &callback_ops[op_nr];  		break; @@ -562,7 +581,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)  	case OP_CB_NOTIFY:  	case OP_CB_PUSH_DELEG:  	case OP_CB_RECALLABLE_OBJ_AVAIL: -	case OP_CB_RECALL_SLOT:  	case OP_CB_WANTS_CANCELLED:  	case OP_CB_NOTIFY_LOCK:  		return htonl(NFS4ERR_NOTSUPP); @@ -602,20 +620,18 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)  static __be32 process_op(uint32_t minorversion, int nop,  		struct svc_rqst *rqstp,  		struct xdr_stream *xdr_in, void *argp, -		struct xdr_stream *xdr_out, void *resp) +		struct xdr_stream *xdr_out, void *resp, int* drc_status)  {  	struct callback_op *op = &callback_ops[0]; -	unsigned int op_nr = OP_CB_ILLEGAL; +	unsigned int op_nr;  	__be32 status;  	long maxlen;  	__be32 res;  	dprintk("%s: start\n", __func__);  	status = decode_op_hdr(xdr_in, &op_nr); -	if (unlikely(status)) { -		status = htonl(NFS4ERR_OP_ILLEGAL); -		goto out; -	} +	if (unlikely(status)) +		return status;  	dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",  		__func__, minorversion, nop, op_nr); @@ -624,19 +640,32 @@ static __be32 process_op(uint32_t minorversion, int nop,  				preprocess_nfs4_op(op_nr, &op);  	if (status == htonl(NFS4ERR_OP_ILLEGAL))  		op_nr = OP_CB_ILLEGAL; -out: +	if (status) +		goto encode_hdr; + +	if (*drc_status) { +		status = *drc_status; +		goto encode_hdr; +	} +  	maxlen = xdr_out->end - xdr_out->p;  	if (maxlen > 0 && maxlen < PAGE_SIZE) { -		if (likely(status == 0 && op->decode_args != NULL)) -			status = op->decode_args(rqstp, xdr_in, argp); -		if (likely(status == 0 && op->process_op != NULL)) +		status = op->decode_args(rqstp, xdr_in, argp); +		if (likely(status == 0))  			status = op->process_op(argp, resp);  	} else  		status = htonl(NFS4ERR_RESOURCE); +	/* Only set by OP_CB_SEQUENCE processing */ +	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { +		*drc_status = status; +		status = 0; +	} + +encode_hdr:  	res = encode_op_hdr(xdr_out, op_nr, status); -	if (status == 0) -		status = res; +	if (unlikely(res)) +		return res;  	if (op->encode_res != NULL && status == 0)  		status = op->encode_res(rqstp, xdr_out, resp);  	dprintk("%s: done, status = %d\n", __func__, ntohl(status)); @@ -652,7 +681,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r  	struct cb_compound_hdr_res hdr_res = { NULL };  	struct xdr_stream xdr_in, xdr_out;  	__be32 *p; -	__be32 status; +	__be32 status, drc_status = 0;  	unsigned int nops = 0;  	dprintk("%s: start\n", __func__); @@ -672,11 +701,18 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r  		return rpc_system_err;  	while (status == 0 && nops != hdr_arg.nops) { -		status = process_op(hdr_arg.minorversion, nops, -				    rqstp, &xdr_in, argp, &xdr_out, resp); +		status = process_op(hdr_arg.minorversion, nops, rqstp, +				    &xdr_in, argp, &xdr_out, resp, &drc_status);  		nops++;  	} +	/* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return +	* resource error in cb_compound status without returning op */ +	if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { +		status = htonl(NFS4ERR_RESOURCE); +		nops--; +	} +  	*hdr_res.status = status;  	*hdr_res.nops = htonl(nops);  	dprintk("%s: done, status = %u\n", __func__, ntohl(status)); @@ -713,6 +749,11 @@ static struct callback_op callback_ops[] = {  		.decode_args = (callback_decode_arg_t)decode_recallany_args,  		.res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,  	}, +	[OP_CB_RECALL_SLOT] = { +		.process_op = (callback_process_op_t)nfs4_callback_recallslot, +		.decode_args = (callback_decode_arg_t)decode_recallslot_args, +		.res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, +	},  #endif /* CONFIG_NFS_V4_1 */  }; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ee77713ce68b..2274f1737336 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -164,30 +164,7 @@ error_0:  	return ERR_PTR(err);  } -static void nfs4_shutdown_client(struct nfs_client *clp) -{ -#ifdef CONFIG_NFS_V4 -	if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) -		nfs4_kill_renewd(clp); -	BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); -	if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) -		nfs_idmap_delete(clp); - -	rpc_destroy_wait_queue(&clp->cl_rpcwaitq); -#endif -} - -/* - * Destroy the NFS4 callback service - */ -static void nfs4_destroy_callback(struct nfs_client *clp) -{  #ifdef CONFIG_NFS_V4 -	if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) -		nfs_callback_down(clp->cl_minorversion); -#endif /* CONFIG_NFS_V4 */ -} -  /*   * Clears/puts all minor version specific parts from an nfs_client struct   * reverting it to minorversion 0. @@ -202,9 +179,33 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)  	clp->cl_call_sync = _nfs4_call_sync;  #endif /* CONFIG_NFS_V4_1 */ +} +/* + * Destroy the NFS4 callback service + */ +static void nfs4_destroy_callback(struct nfs_client *clp) +{ +	if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) +		nfs_callback_down(clp->cl_minorversion); +} + +static void nfs4_shutdown_client(struct nfs_client *clp) +{ +	if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) +		nfs4_kill_renewd(clp); +	nfs4_clear_client_minor_version(clp);  	nfs4_destroy_callback(clp); +	if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) +		nfs_idmap_delete(clp); + +	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);  } +#else +static void nfs4_shutdown_client(struct nfs_client *clp) +{ +} +#endif /* CONFIG_NFS_V4 */  /*   * Destroy a shared client record @@ -213,7 +214,6 @@ static void nfs_free_client(struct nfs_client *clp)  {  	dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); -	nfs4_clear_client_minor_version(clp);  	nfs4_shutdown_client(clp);  	nfs_fscache_release_client_cookie(clp); diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 95e1ca765d47..3f0cd4dfddaf 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -36,6 +36,19 @@ struct nfs_dns_ent {  }; +static void nfs_dns_ent_update(struct cache_head *cnew, +		struct cache_head *ckey) +{ +	struct nfs_dns_ent *new; +	struct nfs_dns_ent *key; + +	new = container_of(cnew, struct nfs_dns_ent, h); +	key = container_of(ckey, struct nfs_dns_ent, h); + +	memcpy(&new->addr, &key->addr, key->addrlen); +	new->addrlen = key->addrlen; +} +  static void nfs_dns_ent_init(struct cache_head *cnew,  		struct cache_head *ckey)  { @@ -49,8 +62,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew,  	new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);  	if (new->hostname) {  		new->namelen = key->namelen; -		memcpy(&new->addr, &key->addr, key->addrlen); -		new->addrlen = key->addrlen; +		nfs_dns_ent_update(cnew, ckey);  	} else {  		new->namelen = 0;  		new->addrlen = 0; @@ -234,7 +246,7 @@ static struct cache_detail nfs_dns_resolve = {  	.cache_show = nfs_dns_show,  	.match = nfs_dns_match,  	.init = nfs_dns_ent_init, -	.update = nfs_dns_ent_init, +	.update = nfs_dns_ent_update,  	.alloc = nfs_dns_ent_alloc,  }; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 63f2071d6445..ae8d02294e46 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -123,11 +123,11 @@ nfs_file_open(struct inode *inode, struct file *filp)  			filp->f_path.dentry->d_parent->d_name.name,  			filp->f_path.dentry->d_name.name); +	nfs_inc_stats(inode, NFSIOS_VFSOPEN);  	res = nfs_check_flags(filp->f_flags);  	if (res)  		return res; -	nfs_inc_stats(inode, NFSIOS_VFSOPEN);  	res = nfs_open(inode, filp);  	return res;  } @@ -237,9 +237,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)  			dentry->d_parent->d_name.name,  			dentry->d_name.name); +	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);  	if ((file->f_mode & FMODE_WRITE) == 0)  		return 0; -	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);  	/* Flush writes to the server and return any errors */  	return nfs_do_fsync(ctx, inode); @@ -262,9 +262,11 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,  		(unsigned long) count, (unsigned long) pos);  	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); -	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); -	if (!result) +	if (!result) {  		result = generic_file_aio_read(iocb, iov, nr_segs, pos); +		if (result > 0) +			nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); +	}  	return result;  } @@ -282,8 +284,11 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,  		(unsigned long) count, (unsigned long long) *ppos);  	res = nfs_revalidate_mapping(inode, filp->f_mapping); -	if (!res) +	if (!res) {  		res = generic_file_splice_read(filp, ppos, pipe, count, flags); +		if (res > 0) +			nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); +	}  	return res;  } @@ -596,6 +601,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,  {  	struct dentry * dentry = iocb->ki_filp->f_path.dentry;  	struct inode * inode = dentry->d_inode; +	unsigned long written = 0;  	ssize_t result;  	size_t count = iov_length(iov, nr_segs); @@ -622,14 +628,18 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,  	if (!count)  		goto out; -	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);  	result = generic_file_aio_write(iocb, iov, nr_segs, pos); +	if (result > 0) +		written = result; +  	/* Return error values for O_DSYNC and IS_SYNC() */  	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {  		int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);  		if (err < 0)  			result = err;  	} +	if (result > 0) +		nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);  out:  	return result; @@ -644,6 +654,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,  {  	struct dentry *dentry = filp->f_path.dentry;  	struct inode *inode = dentry->d_inode; +	unsigned long written = 0;  	ssize_t ret;  	dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", @@ -654,14 +665,17 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,  	 * The combination of splice and an O_APPEND destination is disallowed.  	 */ -	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); -  	ret = generic_file_splice_write(pipe, filp, ppos, count, flags); +	if (ret > 0) +		written = ret; +  	if (ret >= 0 && nfs_need_sync_write(filp, inode)) {  		int err = nfs_do_fsync(nfs_file_open_context(filp), inode);  		if (err < 0)  			ret = err;  	} +	if (ret > 0) +		nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);  	return ret;  } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index dbaaf7d2a188..657201acda84 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -595,11 +595,6 @@ void put_nfs_open_context(struct nfs_open_context *ctx)  	__put_nfs_open_context(ctx, 0);  } -static void put_nfs_open_context_sync(struct nfs_open_context *ctx) -{ -	__put_nfs_open_context(ctx, 1); -} -  /*   * Ensure that mmap has a recent RPC credential for use when writing out   * shared pages @@ -646,7 +641,7 @@ static void nfs_file_clear_open_context(struct file *filp)  		spin_lock(&inode->i_lock);  		list_move_tail(&ctx->list, &NFS_I(inode)->open_files);  		spin_unlock(&inode->i_lock); -		put_nfs_open_context_sync(ctx); +		__put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1);  	}  } diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 3f8881d1a050..24992f0a29f2 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -22,14 +22,14 @@  #define NFSDBG_FACILITY		NFSDBG_PROC -/* A wrapper to handle the EJUKEBOX error message */ +/* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */  static int  nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)  {  	int res;  	do {  		res = rpc_call_sync(clnt, msg, flags); -		if (res != -EJUKEBOX) +		if (res != -EJUKEBOX && res != -EKEYEXPIRED)  			break;  		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);  		res = -ERESTARTSYS; @@ -42,9 +42,10 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)  static int  nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)  { -	if (task->tk_status != -EJUKEBOX) +	if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED)  		return 0; -	nfs_inc_stats(inode, NFSIOS_DELAY); +	if (task->tk_status == -EJUKEBOX) +		nfs_inc_stats(inode, NFSIOS_DELAY);  	task->tk_status = 0;  	rpc_restart_call(task);  	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 0c6fda33d66e..a187200a7aac 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -46,6 +46,7 @@ enum nfs4_client_state {  	NFS4CLNT_DELEGRETURN,  	NFS4CLNT_SESSION_RESET,  	NFS4CLNT_SESSION_DRAINING, +	NFS4CLNT_RECALL_SLOT,  };  /* @@ -280,6 +281,7 @@ extern void nfs4_schedule_state_manager(struct nfs_client *);  extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);  extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);  extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); +extern void nfs41_handle_recall_slot(struct nfs_client *clp);  extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);  extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);  extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 84d83be25a98..eda74c42d552 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -281,6 +281,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,  			}  		case -NFS4ERR_GRACE:  		case -NFS4ERR_DELAY: +		case -EKEYEXPIRED:  			ret = nfs4_delay(server->client, &exception->timeout);  			if (ret != 0)  				break; @@ -418,7 +419,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,  			clp->cl_last_renewal = timestamp;  		spin_unlock(&clp->cl_lock);  		/* Check sequence flags */ -		nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); +		if (atomic_read(&clp->cl_count) > 1) +			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);  	}  out:  	/* The session may be reset by one of the error handlers. */ @@ -1163,7 +1165,7 @@ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state  	int err;  	do {  		err = _nfs4_do_open_reclaim(ctx, state); -		if (err != -NFS4ERR_DELAY) +		if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)  			break;  		nfs4_handle_exception(server, err, &exception);  	} while (exception.retry); @@ -1582,6 +1584,7 @@ static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state  			goto out;  		case -NFS4ERR_GRACE:  		case -NFS4ERR_DELAY: +		case -EKEYEXPIRED:  			nfs4_handle_exception(server, err, &exception);  			err = 0;  		} @@ -3145,10 +3148,19 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa   * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special   * standalone procedure for queueing an asynchronous RENEW.   */ +static void nfs4_renew_release(void *data) +{ +	struct nfs_client *clp = data; + +	if (atomic_read(&clp->cl_count) > 1) +		nfs4_schedule_state_renewal(clp); +	nfs_put_client(clp); +} +  static void nfs4_renew_done(struct rpc_task *task, void *data)  { -	struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; -	unsigned long timestamp = (unsigned long)data; +	struct nfs_client *clp = data; +	unsigned long timestamp = task->tk_start;  	if (task->tk_status < 0) {  		/* Unless we're shutting down, schedule state recovery! */ @@ -3164,6 +3176,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)  static const struct rpc_call_ops nfs4_renew_ops = {  	.rpc_call_done = nfs4_renew_done, +	.rpc_release = nfs4_renew_release,  };  int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) @@ -3174,8 +3187,10 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)  		.rpc_cred	= cred,  	}; +	if (!atomic_inc_not_zero(&clp->cl_count)) +		return -EIO;  	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, -			&nfs4_renew_ops, (void *)jiffies); +			&nfs4_renew_ops, clp);  }  int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) @@ -3452,6 +3467,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,  			if (server)  				nfs_inc_server_stats(server, NFSIOS_DELAY);  		case -NFS4ERR_GRACE: +		case -EKEYEXPIRED:  			rpc_delay(task, NFS4_POLL_RETRY_MAX);  			task->tk_status = 0;  			return -EAGAIN; @@ -3564,6 +3580,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)  			case -NFS4ERR_RESOURCE:  				/* The IBM lawyers misread another document! */  			case -NFS4ERR_DELAY: +			case -EKEYEXPIRED:  				err = nfs4_delay(clp->cl_rpcclient, &timeout);  		}  	} while (err == 0); @@ -4179,7 +4196,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request  		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)  			return 0;  		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); -		if (err != -NFS4ERR_DELAY) +		if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)  			break;  		nfs4_handle_exception(server, err, &exception);  	} while (exception.retry); @@ -4204,6 +4221,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request  			goto out;  		case -NFS4ERR_GRACE:  		case -NFS4ERR_DELAY: +		case -EKEYEXPIRED:  			nfs4_handle_exception(server, err, &exception);  			err = 0;  		} @@ -4355,6 +4373,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)  				err = 0;  				goto out;  			case -NFS4ERR_DELAY: +			case -EKEYEXPIRED:  				break;  		}  		err = nfs4_handle_exception(server, err, &exception); @@ -4500,7 +4519,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)  		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); -		if (status != NFS4ERR_CLID_INUSE) +		if (status != -NFS4ERR_CLID_INUSE)  			break;  		if (signalled()) @@ -4554,6 +4573,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)  	switch (task->tk_status) {  	case -NFS4ERR_DELAY:  	case -NFS4ERR_GRACE: +	case -EKEYEXPIRED:  		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);  		rpc_delay(task, NFS4_POLL_RETRY_MIN);  		task->tk_status = 0; @@ -4611,26 +4631,32 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)  /*   * Reset a slot table   */ -static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, -		int old_max_slots, int ivalue) +static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, +				 int ivalue)  { +	struct nfs4_slot *new = NULL;  	int i;  	int ret = 0; -	dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); +	dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, +		max_reqs, tbl->max_slots); -	/* -	 * Until we have dynamic slot table adjustment, insist -	 * upon the same slot table size -	 */ -	if (max_slots != old_max_slots) { -		dprintk("%s reset slot table does't match old\n", -			__func__); -		ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ -		goto out; +	/* Does the newly negotiated max_reqs match the existing slot table? */ +	if (max_reqs != tbl->max_slots) { +		ret = -ENOMEM; +		new = kmalloc(max_reqs * sizeof(struct nfs4_slot), +			      GFP_KERNEL); +		if (!new) +			goto out; +		ret = 0; +		kfree(tbl->slots);  	}  	spin_lock(&tbl->slot_tbl_lock); -	for (i = 0; i < max_slots; ++i) +	if (new) { +		tbl->slots = new; +		tbl->max_slots = max_reqs; +	} +	for (i = 0; i < tbl->max_slots; ++i)  		tbl->slots[i].seq_nr = ivalue;  	spin_unlock(&tbl->slot_tbl_lock);  	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, @@ -4648,16 +4674,12 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session)  	int status;  	status = nfs4_reset_slot_table(&session->fc_slot_table, -			session->fc_attrs.max_reqs, -			session->fc_slot_table.max_slots, -			1); +			session->fc_attrs.max_reqs, 1);  	if (status)  		return status;  	status = nfs4_reset_slot_table(&session->bc_slot_table, -			session->bc_attrs.max_reqs, -			session->bc_slot_table.max_slots, -			0); +			session->bc_attrs.max_reqs, 0);  	return status;  } @@ -4798,16 +4820,14 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)  	args->fc_attrs.headerpadsz = 0;  	args->fc_attrs.max_rqst_sz = mxrqst_sz;  	args->fc_attrs.max_resp_sz = mxresp_sz; -	args->fc_attrs.max_resp_sz_cached = mxresp_sz;  	args->fc_attrs.max_ops = NFS4_MAX_OPS;  	args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;  	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " -		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", +		"max_ops=%u max_reqs=%u\n",  		__func__,  		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, -		args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, -		args->fc_attrs.max_reqs); +		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);  	/* Back channel attributes */  	args->bc_attrs.headerpadsz = 0; @@ -5016,7 +5036,16 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)  				       &res, args.sa_cache_this, 1);  } -void nfs41_sequence_call_done(struct rpc_task *task, void *data) +static void nfs41_sequence_release(void *data) +{ +	struct nfs_client *clp = (struct nfs_client *)data; + +	if (atomic_read(&clp->cl_count) > 1) +		nfs4_schedule_state_renewal(clp); +	nfs_put_client(clp); +} + +static void nfs41_sequence_call_done(struct rpc_task *task, void *data)  {  	struct nfs_client *clp = (struct nfs_client *)data; @@ -5024,6 +5053,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)  	if (task->tk_status < 0) {  		dprintk("%s ERROR %d\n", __func__, task->tk_status); +		if (atomic_read(&clp->cl_count) == 1) +			goto out;  		if (_nfs4_async_handle_error(task, NULL, clp, NULL)  								== -EAGAIN) { @@ -5032,7 +5063,7 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)  		}  	}  	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); - +out:  	kfree(task->tk_msg.rpc_argp);  	kfree(task->tk_msg.rpc_resp); @@ -5057,6 +5088,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)  static const struct rpc_call_ops nfs41_sequence_ops = {  	.rpc_call_done = nfs41_sequence_call_done,  	.rpc_call_prepare = nfs41_sequence_prepare, +	.rpc_release = nfs41_sequence_release,  };  static int nfs41_proc_async_sequence(struct nfs_client *clp, @@ -5069,12 +5101,13 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,  		.rpc_cred = cred,  	}; +	if (!atomic_inc_not_zero(&clp->cl_count)) +		return -EIO;  	args = kzalloc(sizeof(*args), GFP_KERNEL); -	if (!args) -		return -ENOMEM;  	res = kzalloc(sizeof(*res), GFP_KERNEL); -	if (!res) { +	if (!args || !res) {  		kfree(args); +		nfs_put_client(clp);  		return -ENOMEM;  	}  	res->sr_slotid = NFS4_MAX_SLOT_TABLE; diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 0156c01c212c..d87f10327b72 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -36,11 +36,6 @@   * as an rpc_task, not a real kernel thread, so it always runs in rpciod's   * context.  There is one renewd per nfs_server.   * - * TODO: If the send queue gets backlogged (e.g., if the server goes down), - * we will keep filling the queue with periodic RENEW requests.  We need a - * mechanism for ensuring that if renewd successfully sends off a request, - * then it only wakes up when the request is finished.  Maybe use the - * child task framework of the RPC layer?   */  #include <linux/mm.h> @@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work)  	struct nfs_client *clp =  		container_of(work, struct nfs_client, cl_renewd.work);  	struct rpc_cred *cred; -	long lease, timeout; +	long lease;  	unsigned long last, now;  	ops = nfs4_state_renewal_ops[clp->cl_minorversion]; @@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work)  	lease = clp->cl_lease_time;  	last = clp->cl_last_renewal;  	now = jiffies; -	timeout = (2 * lease) / 3 + (long)last - (long)now;  	/* Are we close to a lease timeout? */  	if (time_after(now, last + lease/3)) {  		cred = ops->get_state_renewal_cred_locked(clp); @@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work)  			/* Queue an asynchronous RENEW. */  			ops->sched_state_renewal(clp, cred);  			put_rpccred(cred); +			goto out_exp;  		} -		timeout = (2 * lease) / 3; -		spin_lock(&clp->cl_lock); -	} else +	} else {  		dprintk("%s: failed to call renewd. Reason: lease not expired \n",  				__func__); -	if (timeout < 5 * HZ)    /* safeguard */ -		timeout = 5 * HZ; -	dprintk("%s: requeueing work. Lease period = %ld\n", -			__func__, (timeout + HZ - 1) / HZ); -	cancel_delayed_work(&clp->cl_renewd); -	schedule_delayed_work(&clp->cl_renewd, timeout); -	spin_unlock(&clp->cl_lock); +		spin_unlock(&clp->cl_lock); +	} +	nfs4_schedule_state_renewal(clp); +out_exp:  	nfs_expire_unreferenced_delegations(clp);  out:  	dprintk("%s: done\n", __func__); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c1e2733f4fa4..6c5ed51f105e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1249,26 +1249,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)  }  #ifdef CONFIG_NFS_V4_1 +void nfs41_handle_recall_slot(struct nfs_client *clp) +{ +	set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); +	nfs4_schedule_state_recovery(clp); +} + +static void nfs4_reset_all_state(struct nfs_client *clp) +{ +	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { +		clp->cl_boot_time = CURRENT_TIME; +		nfs4_state_start_reclaim_nograce(clp); +		nfs4_schedule_state_recovery(clp); +	} +} + +static void nfs41_handle_server_reboot(struct nfs_client *clp) +{ +	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { +		nfs4_state_start_reclaim_reboot(clp); +		nfs4_schedule_state_recovery(clp); +	} +} + +static void nfs41_handle_state_revoked(struct nfs_client *clp) +{ +	/* Temporary */ +	nfs4_reset_all_state(clp); +} + +static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) +{ +	/* This will need to handle layouts too */ +	nfs_expire_all_delegations(clp); +} + +static void nfs41_handle_cb_path_down(struct nfs_client *clp) +{ +	nfs_expire_all_delegations(clp); +	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) +		nfs4_schedule_state_recovery(clp); +} +  void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)  {  	if (!flags)  		return; -	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { -		set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); -		nfs4_state_start_reclaim_reboot(clp); -		nfs4_schedule_state_recovery(clp); -	} else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | +	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) +		nfs41_handle_server_reboot(clp); +	else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |  			    SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |  			    SEQ4_STATUS_ADMIN_STATE_REVOKED | -			    SEQ4_STATUS_RECALLABLE_STATE_REVOKED | -			    SEQ4_STATUS_LEASE_MOVED)) { -		set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); -		nfs4_state_start_reclaim_nograce(clp); -		nfs4_schedule_state_recovery(clp); -	} else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | +			    SEQ4_STATUS_LEASE_MOVED)) +		nfs41_handle_state_revoked(clp); +	else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) +		nfs41_handle_recallable_state_revoked(clp); +	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |  			    SEQ4_STATUS_BACKCHANNEL_FAULT |  			    SEQ4_STATUS_CB_PATH_DOWN_SESSION)) -		nfs_expire_all_delegations(clp); +		nfs41_handle_cb_path_down(clp);  }  static int nfs4_reset_session(struct nfs_client *clp) @@ -1285,23 +1324,52 @@ static int nfs4_reset_session(struct nfs_client *clp)  	memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);  	status = nfs4_proc_create_session(clp); -	if (status) +	if (status) {  		status = nfs4_recovery_handle_error(clp, status); +		goto out; +	} +	/* create_session negotiated new slot table */ +	clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); -out: -	/* -	 * Let the state manager reestablish state -	 */ -	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && -	    status == 0) +	 /* Let the state manager reestablish state */ +	if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))  		nfs41_setup_state_renewal(clp); - +out:  	return status;  } +static int nfs4_recall_slot(struct nfs_client *clp) +{ +	struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; +	struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; +	struct nfs4_slot *new, *old; +	int i; + +	nfs4_begin_drain_session(clp); +	new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), +		      GFP_KERNEL); +        if (!new) +		return -ENOMEM; + +	spin_lock(&fc_tbl->slot_tbl_lock); +	for (i = 0; i < fc_tbl->target_max_slots; i++) +		new[i].seq_nr = fc_tbl->slots[i].seq_nr; +	old = fc_tbl->slots; +	fc_tbl->slots = new; +	fc_tbl->max_slots = fc_tbl->target_max_slots; +	fc_tbl->target_max_slots = 0; +	fc_attrs->max_reqs = fc_tbl->max_slots; +	spin_unlock(&fc_tbl->slot_tbl_lock); + +	kfree(old); +	nfs4_end_drain_session(clp); +	return 0; +} +  #else /* CONFIG_NFS_V4_1 */  static int nfs4_reset_session(struct nfs_client *clp) { return 0; }  static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } +static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }  #endif /* CONFIG_NFS_V4_1 */  /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors @@ -1314,6 +1382,7 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status)  		case -NFS4ERR_DELAY:  		case -NFS4ERR_CLID_INUSE:  		case -EAGAIN: +		case -EKEYEXPIRED:  			break;  		case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery @@ -1397,6 +1466,15 @@ static void nfs4_state_manager(struct nfs_client *clp)  			nfs_client_return_marked_delegations(clp);  			continue;  		} +		/* Recall session slots */ +		if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) +		   && nfs4_has_session(clp)) { +			status = nfs4_recall_slot(clp); +			if (status < 0) +				goto out_error; +			continue; +		} +  		nfs4_clear_state_manager_bit(clp);  		/* Did we race with an attempt to give us more work? */ diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5cd5184b56db..4d338be492cb 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1578,6 +1578,14 @@ static void encode_create_session(struct xdr_stream *xdr,  	char machine_name[NFS4_MAX_MACHINE_NAME_LEN];  	uint32_t len;  	struct nfs_client *clp = args->client; +	u32 max_resp_sz_cached; + +	/* +	 * Assumes OPEN is the biggest non-idempotent compound. +	 * 2 is the verifier. +	 */ +	max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + +			      RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;  	len = scnprintf(machine_name, sizeof(machine_name), "%s",  			clp->cl_ipaddr); @@ -1592,7 +1600,7 @@ static void encode_create_session(struct xdr_stream *xdr,  	*p++ = cpu_to_be32(args->fc_attrs.headerpadsz);	/* header padding size */  	*p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz);	/* max req size */  	*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz);	/* max resp size */ -	*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached);	/* Max resp sz cached */ +	*p++ = cpu_to_be32(max_resp_sz_cached);		/* Max resp sz cached */  	*p++ = cpu_to_be32(args->fc_attrs.max_ops);	/* max operations */  	*p++ = cpu_to_be32(args->fc_attrs.max_reqs);	/* max requests */  	*p++ = cpu_to_be32(0);				/* rdmachannel_attrs */ diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index ef583854d8d0..c752d944fe9e 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -47,6 +47,39 @@  #define NFSDBG_FACILITY		NFSDBG_PROC  /* + * wrapper to handle the -EKEYEXPIRED error message. This should generally + * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't + * support the NFSERR_JUKEBOX error code, but we handle this situation in the + * same way that we handle that error with NFSv3. + */ +static int +nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) +{ +	int res; +	do { +		res = rpc_call_sync(clnt, msg, flags); +		if (res != -EKEYEXPIRED) +			break; +		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); +		res = -ERESTARTSYS; +	} while (!fatal_signal_pending(current)); +	return res; +} + +#define rpc_call_sync(clnt, msg, flags)	nfs_rpc_wrapper(clnt, msg, flags) + +static int +nfs_async_handle_expired_key(struct rpc_task *task) +{ +	if (task->tk_status != -EKEYEXPIRED) +		return 0; +	task->tk_status = 0; +	rpc_restart_call(task); +	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); +	return 1; +} + +/*   * Bare-bones access to getattr: this is for nfs_read_super.   */  static int @@ -307,6 +340,8 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)  static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)  { +	if (nfs_async_handle_expired_key(task)) +		return 0;  	nfs_mark_for_revalidate(dir);  	return 1;  } @@ -560,6 +595,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,  static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)  { +	if (nfs_async_handle_expired_key(task)) +		return -EAGAIN; +  	nfs_invalidate_atime(data->inode);  	if (task->tk_status >= 0) {  		nfs_refresh_inode(data->inode, data->res.fattr); @@ -579,6 +617,9 @@ static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *  static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)  { +	if (nfs_async_handle_expired_key(task)) +		return -EAGAIN; +  	if (task->tk_status >= 0)  		nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);  	return 0; | 
