diff options
| -rw-r--r-- | fs/locks.c | 6 | ||||
| -rw-r--r-- | fs/nfsd/nfs3xdr.c | 31 | ||||
| -rw-r--r-- | fs/nfsd/nfs4proc.c | 9 | ||||
| -rw-r--r-- | fs/nfsd/nfs4state.c | 6 | ||||
| -rw-r--r-- | fs/nfsd/nfs4xdr.c | 16 | ||||
| -rw-r--r-- | fs/nfsd/nfsfh.h | 28 | ||||
| -rw-r--r-- | fs/nfsd/nfsxdr.c | 1 | ||||
| -rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 2 | ||||
| -rw-r--r-- | net/sunrpc/svcsock.c | 14 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 5 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 9 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 12 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 6 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 25 | 
14 files changed, 73 insertions, 97 deletions
| diff --git a/fs/locks.c b/fs/locks.c index 21b4dfa289ee..d6ff4beb70ce 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1554,9 +1554,9 @@ out:  EXPORT_SYMBOL(__break_lease);  /** - *	lease_get_mtime - get the last modified time of an inode + *	lease_get_mtime - update modified time of an inode with exclusive lease   *	@inode: the inode - *      @time:  pointer to a timespec which will contain the last modified time + *      @time:  pointer to a timespec which contains the last modified time   *   * This is to force NFS clients to flush their caches for files with   * exclusive leases.  The justification is that if someone has an @@ -1580,8 +1580,6 @@ void lease_get_mtime(struct inode *inode, struct timespec *time)  	if (has_lease)  		*time = current_time(inode); -	else -		*time = inode->i_mtime;  }  EXPORT_SYMBOL(lease_get_mtime); diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 2758480555fa..1a70581e1cb2 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -251,6 +251,34 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)  }  /* + * Fill in the pre_op attr for the wcc data + */ +void fill_pre_wcc(struct svc_fh *fhp) +{ +	struct inode    *inode; +	struct kstat	stat; +	__be32 err; + +	if (fhp->fh_pre_saved) +		return; + +	inode = d_inode(fhp->fh_dentry); +	err = fh_getattr(fhp, &stat); +	if (err) { +		/* Grab the times from inode anyway */ +		stat.mtime = inode->i_mtime; +		stat.ctime = inode->i_ctime; +		stat.size  = inode->i_size; +	} + +	fhp->fh_pre_mtime = stat.mtime; +	fhp->fh_pre_ctime = stat.ctime; +	fhp->fh_pre_size  = stat.size; +	fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode); +	fhp->fh_pre_saved = true; +} + +/*   * Fill in the post_op attr for the wcc data   */  void fill_post_wcc(struct svc_fh *fhp) @@ -261,7 +289,8 @@ void fill_post_wcc(struct svc_fh *fhp)  		printk("nfsd: inode locked twice during operation.\n");  	err = fh_getattr(fhp, &fhp->fh_post_attr); -	fhp->fh_post_change = nfsd4_change_attribute(d_inode(fhp->fh_dentry)); +	fhp->fh_post_change = nfsd4_change_attribute(&fhp->fh_post_attr, +						     d_inode(fhp->fh_dentry));  	if (err) {  		fhp->fh_post_saved = false;  		/* Grab the ctime anyway - set_change_info might use it */ diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 008ea0b627d0..a0bed2b2004d 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1363,14 +1363,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,  	const struct nfsd4_layout_ops *ops;  	struct nfs4_layout_stateid *ls;  	__be32 nfserr; -	int accmode; +	int accmode = NFSD_MAY_READ_IF_EXEC;  	switch (lgp->lg_seg.iomode) {  	case IOMODE_READ: -		accmode = NFSD_MAY_READ; +		accmode |= NFSD_MAY_READ;  		break;  	case IOMODE_RW: -		accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; +		accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;  		break;  	default:  		dprintk("%s: invalid iomode %d\n", @@ -1703,6 +1703,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)  	status = nfserr_minor_vers_mismatch;  	if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)  		goto out; +	status = nfserr_resource; +	if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND) +		goto out;  	status = nfs41_check_op_ordering(args);  	if (status) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b29b5a185a2c..150521c9671b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s)  	switch (s->sc_type) {  	default:  		break; +	case 0:  	case NFS4_CLOSED_STID:  	case NFS4_CLOSED_DELEG_STID:  		ret = nfserr_bad_stateid; @@ -5182,7 +5183,6 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)  			    lockowner(stp->st_stateowner)))  		goto out; -	stp->st_stid.sc_type = NFS4_CLOSED_STID;  	release_lock_stateid(stp);  	ret = nfs_ok; @@ -6078,10 +6078,8 @@ out:  		 * If this is a new, never-before-used stateid, and we are  		 * returning an error, then just go ahead and release it.  		 */ -		if (status && new) { -			lock_stp->st_stid.sc_type = NFS4_CLOSED_STID; +		if (status && new)  			release_lock_stateid(lock_stp); -		}  		mutex_unlock(&lock_stp->st_mutex); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2c61c6b8ae09..e502fd16246b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -455,8 +455,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,  	}  	label->len = 0; -#ifdef CONFIG_NFSD_V4_SECURITY_LABEL -	if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) { +	if (IS_ENABLED(CONFIG_NFSD_V4_SECURITY_LABEL) && +	    bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {  		READ_BUF(4);  		len += 4;  		dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */ @@ -476,7 +476,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,  		if (!label->data)  			return nfserr_jukebox;  	} -#endif  	if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {  		if (!umask)  			goto xdr_error; @@ -1918,8 +1917,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)  	if (argp->taglen > NFSD4_MAX_TAGLEN)  		goto xdr_error; -	if (argp->opcnt > 100) -		goto xdr_error; +	/* +	 * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS +	 * here, so we return success at the xdr level so that +	 * nfsd4_proc can handle this is an NFS-level error. +	 */ +	if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND) +		return 0;  	if (argp->opcnt > ARRAY_SIZE(argp->iops)) {  		argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); @@ -1991,7 +1995,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,  		*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));  		*p++ = 0;  	} else if (IS_I_VERSION(inode)) { -		p = xdr_encode_hyper(p, nfsd4_change_attribute(inode)); +		p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));  	} else {  		*p++ = cpu_to_be32(stat->ctime.tv_sec);  		*p++ = cpu_to_be32(stat->ctime.tv_nsec); diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index b8444189223b..755e256a9103 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -253,36 +253,20 @@ fh_clear_wcc(struct svc_fh *fhp)   * By using both ctime and the i_version counter we guarantee that as   * long as time doesn't go backwards we never reuse an old value.   */ -static inline u64 nfsd4_change_attribute(struct inode *inode) +static inline u64 nfsd4_change_attribute(struct kstat *stat, +					 struct inode *inode)  {  	u64 chattr; -	chattr =  inode->i_ctime.tv_sec; +	chattr =  stat->ctime.tv_sec;  	chattr <<= 30; -	chattr += inode->i_ctime.tv_nsec; +	chattr += stat->ctime.tv_nsec;  	chattr += inode_query_iversion(inode);  	return chattr;  } -/* - * Fill in the pre_op attr for the wcc data - */ -static inline void -fill_pre_wcc(struct svc_fh *fhp) -{ -	struct inode    *inode; - -	inode = d_inode(fhp->fh_dentry); -	if (!fhp->fh_pre_saved) { -		fhp->fh_pre_mtime = inode->i_mtime; -		fhp->fh_pre_ctime = inode->i_ctime; -		fhp->fh_pre_size  = inode->i_size; -		fhp->fh_pre_change = nfsd4_change_attribute(inode); -		fhp->fh_pre_saved = true; -	} -} - -extern void fill_post_wcc(struct svc_fh *); +extern void fill_pre_wcc(struct svc_fh *fhp); +extern void fill_post_wcc(struct svc_fh *fhp);  #else  #define fh_clear_wcc(ignored)  #define fill_pre_wcc(ignored) diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 644a0342f0e0..79b6064f8977 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -188,6 +188,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,  	*p++ = htonl((u32) stat->ino);  	*p++ = htonl((u32) stat->atime.tv_sec);  	*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); +	time = stat->mtime;  	lease_get_mtime(d_inode(dentry), &time);   	*p++ = htonl((u32) time.tv_sec);  	*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);  diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 995c6fe9ee90..4b731b046bcd 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);  extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);  extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);  extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); -extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t); -extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);  extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);  extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);  extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5570719e4787..943f2a745cd5 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,  static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,  				unsigned int rcv)  { -#if 0 -	mm_segment_t	oldfs; -	oldfs = get_fs(); set_fs(KERNEL_DS); -	sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, -			(char*)&snd, sizeof(snd)); -	sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, -			(char*)&rcv, sizeof(rcv)); -#else -	/* sock_setsockopt limits use to sysctl_?mem_max, -	 * which isn't acceptable.  Until that is made conditional -	 * on not having CAP_SYS_RESOURCE or similar, we go direct... -	 * DaveM said I could! -	 */  	lock_sock(sock->sk);  	sock->sk->sk_sndbuf = snd * 2;  	sock->sk->sk_rcvbuf = rcv * 2;  	sock->sk->sk_write_space(sock->sk);  	release_sock(sock->sk); -#endif  }  static int svc_sock_secure_port(struct svc_rqst *rqstp) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index af7893501e40..a73632ca9048 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -95,7 +95,6 @@ out_shortreply:  out_notfound:  	dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",  		xprt, be32_to_cpu(xid)); -  	goto out_unlock;  } @@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,  	if (ret < 0)  		goto out_err; -	ret = svc_rdma_repost_recv(rdma, GFP_NOIO); -	if (ret) -		goto out_err; -  	/* Bump page refcnt so Send completion doesn't release  	 * the rq_buffer before all retransmits are complete.  	 */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index ad4bd62eebf1..19e9c6b33042 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,  	struct page *page;  	int ret; -	ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); -	if (ret) -		return; -  	page = alloc_page(GFP_KERNEL);  	if (!page)  		return; @@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)  		ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,  					       &rqstp->rq_arg);  		svc_rdma_put_context(ctxt, 0); -		if (ret) -			goto repost;  		return ret;  	} @@ -590,6 +584,5 @@ out_postfail:  out_drop:  	svc_rdma_put_context(ctxt, 1); -repost: -	return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL); +	return 0;  } diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 9bd04549a1ad..12b9a7e0b6d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,  		head->arg.head[0].iov_len - info->ri_position;  	head->arg.head[0].iov_len = info->ri_position; -	/* Read chunk may need XDR roundup (see RFC 5666, s. 3.7). +	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).  	 * -	 * NFSv2/3 write decoders need the length of the tail to -	 * contain the size of the roundup padding. +	 * If the client already rounded up the chunk length, the +	 * length does not change. Otherwise, the length of the page +	 * list is increased to include XDR round-up. +	 * +	 * Currently these chunks always start at page offset 0, +	 * thus the rounded-up length never crosses a page boundary.  	 */ -	head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3); +	info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;  	head->arg.page_len = info->ri_chunklen;  	head->arg.len += info->ri_chunklen; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 7c3a211e0e9a..649441d5087d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)  		svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);  	} -	ret = svc_rdma_post_recv(rdma, GFP_KERNEL); -	if (ret) -		goto err1;  	ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp,  				      wr_lst, rp_ch);  	if (ret < 0) @@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)  	if (ret != -E2BIG && ret != -EINVAL)  		goto err1; -	ret = svc_rdma_post_recv(rdma, GFP_KERNEL); -	if (ret) -		goto err1;  	ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp);  	if (ret < 0)  		goto err0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 46ec069150d5..9ad12a215b51 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -58,6 +58,7 @@  #define RPCDBG_FACILITY	RPCDBG_SVCXPRT +static int svc_rdma_post_recv(struct svcxprt_rdma *xprt);  static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);  static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,  					struct net *net, @@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)  	list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);  	spin_unlock(&xprt->sc_rq_dto_lock); +	svc_rdma_post_recv(xprt); +  	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);  	if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))  		goto out; @@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,  	return cma_xprt;  } -int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) +static int +svc_rdma_post_recv(struct svcxprt_rdma *xprt)  {  	struct ib_recv_wr recv_wr, *bad_recv_wr;  	struct svc_rdma_op_ctxt *ctxt; @@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)  			pr_err("svcrdma: Too many sges (%d)\n", sge_no);  			goto err_put_ctxt;  		} -		page = alloc_page(flags); +		page = alloc_page(GFP_KERNEL);  		if (!page)  			goto err_put_ctxt;  		ctxt->pages[sge_no] = page; @@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)  	return -ENOMEM;  } -int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) -{ -	int ret = 0; - -	ret = svc_rdma_post_recv(xprt, flags); -	if (ret) { -		pr_err("svcrdma: could not post a receive buffer, err=%d.\n", -		       ret); -		pr_err("svcrdma: closing transport %p.\n", xprt); -		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); -		ret = -ENOTCONN; -	} -	return ret; -} -  static void  svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,  			       struct rdma_conn_param *param) @@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)  	/* Post receive buffers */  	for (i = 0; i < newxprt->sc_max_requests; i++) { -		ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); +		ret = svc_rdma_post_recv(newxprt);  		if (ret) {  			dprintk("svcrdma: failure posting receive buffers\n");  			goto errout; | 
