summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2025-03-01 13:31:50 -0500
committerChuck Lever <chuck.lever@oracle.com>2025-05-11 19:48:21 -0400
commit281ae67c948b4e6c1f93fe0ce3d2aa77e9b59047 (patch)
treed008f13cd42c5ba2aecb861123c66a22e0e52b48
parent4f3c8d8c9e109655311bbb4c6c46a54a9ef90d99 (diff)
NFSD: Implement CB_SEQUENCE referring call lists
The slot index number of the current COMPOUND has, until now, not been needed outside of nfsd4_sequence(). But to record the tuple that represents a referring call, the slot number will be needed when processing subsequent operations in the COMPOUND. Refactor the code that allocates a new struct nfsd4_slot to ensure that the new sl_index field is always correctly initialized. Reviewed-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/nfsd/state.h1
2 files changed, 22 insertions, 17 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 59a693f22452..5f5bed2a0312 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1987,26 +1987,30 @@ reduce_session_slots(struct nfsd4_session *ses, int dec)
return ret;
}
-/*
- * We don't actually need to cache the rpc and session headers, so we
- * can allocate a little less for each slot:
- */
-static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
+static struct nfsd4_slot *nfsd4_alloc_slot(struct nfsd4_channel_attrs *fattrs,
+ int index, gfp_t gfp)
{
- u32 size;
+ struct nfsd4_slot *slot;
+ size_t size;
- if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
- size = 0;
- else
- size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
- return size + sizeof(struct nfsd4_slot);
+ /*
+ * The RPC and NFS session headers are never saved in
+ * the slot reply cache buffer.
+ */
+ size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ?
+ 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+
+ slot = kzalloc(struct_size(slot, sl_data, size), gfp);
+ if (!slot)
+ return NULL;
+ slot->sl_index = index;
+ return slot;
}
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
- int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
struct nfsd4_slot *slot;
int i;
@@ -2015,14 +2019,14 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
if (!new)
return NULL;
xa_init(&new->se_slots);
- /* allocate each struct nfsd4_slot and data cache in one piece */
- slot = kzalloc(slotsize, GFP_KERNEL);
+
+ slot = nfsd4_alloc_slot(fattrs, 0, GFP_KERNEL);
if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL)))
goto out_free;
for (i = 1; i < numslots; i++) {
const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
- slot = kzalloc(slotsize, gfp);
+ slot = nfsd4_alloc_slot(fattrs, i, gfp);
if (!slot)
break;
if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) {
@@ -4438,8 +4442,8 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* spinlock, and only succeeds if there is
* plenty of memory.
*/
- slot = kzalloc(slot_bytes(&session->se_fchannel),
- GFP_NOWAIT);
+ slot = nfsd4_alloc_slot(&session->se_fchannel, s,
+ GFP_NOWAIT);
prev_slot = xa_load(&session->se_slots, s);
if (xa_is_value(prev_slot) && slot) {
slot->sl_seqid = xa_to_value(prev_slot);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 7d5758faef6a..1995bca158b8 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -278,6 +278,7 @@ struct nfsd4_slot {
u32 sl_seqid;
__be32 sl_status;
struct svc_cred sl_cred;
+ u32 sl_index;
u32 sl_datalen;
u16 sl_opcnt;
u16 sl_generation;