]> git.hungrycats.org Git - linux/commitdiff
NFSD: Limit the number of concurrent async COPY operations
authorChuck Lever <chuck.lever@oracle.com>
Wed, 28 Aug 2024 17:40:04 +0000 (13:40 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2024 10:01:11 +0000 (12:01 +0200)
[ Upstream commit aadc3bbea163b6caaaebfdd2b6c4667fbc726752 ]

Nothing appears to limit the number of concurrent async COPY
operations that clients can start. In addition, AFAICT each async
COPY can copy an unlimited number of 4MB chunks, so can run for a
long time. Thus IMO async COPY can become a DoS vector.

Add a restriction mechanism that bounds the number of concurrent
background COPY operations. Start simple and try to be fair -- this
patch implements a per-namespace limit.

An async COPY request that occurs while this limit is exceeded gets
NFS4ERR_DELAY. The requesting client can choose to send the request
again after a delay or fall back to a traditional read/write style
copy.

If there is need to make the mechanism more sophisticated, we can
visit that in future patches.

Cc: stable@vger.kernel.org
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/nfsd/netns.h
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/xdr4.h

index 14ec15656320903b2bdc9b2426b462498050e7d1..5cae26917436c0d48d37ba904e2cdc1bc83b706e 100644 (file)
@@ -148,6 +148,7 @@ struct nfsd_net {
        u32             s2s_cp_cl_id;
        struct idr      s2s_cp_stateids;
        spinlock_t      s2s_cp_lock;
+       atomic_t        pending_async_copies;
 
        /*
         * Version information
index 60c526adc27c62d91ff7fa90d3c4f041bfec3cf1..5768b2ff1d1d13b98aa8d42ef57bdb081b2284a3 100644 (file)
@@ -1279,6 +1279,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
 {
        if (!refcount_dec_and_test(&copy->refcount))
                return;
+       atomic_dec(&copy->cp_nn->pending_async_copies);
        kfree(copy->cp_src);
        kfree(copy);
 }
@@ -1833,10 +1834,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        memcpy(&copy->fh, &cstate->current_fh.fh_handle,
                sizeof(struct knfsd_fh));
        if (nfsd4_copy_is_async(copy)) {
-               status = nfserrno(-ENOMEM);
                async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
                if (!async_copy)
                        goto out_err;
+               async_copy->cp_nn = nn;
+               /* Arbitrary cap on number of pending async copy operations */
+               if (atomic_inc_return(&nn->pending_async_copies) >
+                               (int)rqstp->rq_pool->sp_nrthreads) {
+                       atomic_dec(&nn->pending_async_copies);
+                       goto out_err;
+               }
                INIT_LIST_HEAD(&async_copy->copies);
                refcount_set(&async_copy->refcount, 1);
                async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
@@ -1876,7 +1883,7 @@ out_err:
        }
        if (async_copy)
                cleanup_async_copy(async_copy);
-       status = nfserrno(-ENOMEM);
+       status = nfserr_jukebox;
        goto out;
 }
 
index f4eae4b65572a11691f821d4cc13507200bc9562..3837f4e417247e33354c27cad1d9de4699921fd0 100644 (file)
@@ -8575,6 +8575,7 @@ static int nfs4_state_create_net(struct net *net)
        spin_lock_init(&nn->client_lock);
        spin_lock_init(&nn->s2s_cp_lock);
        idr_init(&nn->s2s_cp_stateids);
+       atomic_set(&nn->pending_async_copies, 0);
 
        spin_lock_init(&nn->blocked_locks_lock);
        INIT_LIST_HEAD(&nn->blocked_locks_lru);
index fbdd42cde1fa5bc6ed7df1aec685dd111b50263c..2a21a7662e030cbd0f54060edd2b91e4d8c86d7e 100644 (file)
@@ -713,6 +713,7 @@ struct nfsd4_copy {
        struct nfsd4_ssc_umount_item *ss_nsui;
        struct nfs_fh           c_fh;
        nfs4_stateid            stateid;
+       struct nfsd_net         *cp_nn;
 };
 
 static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)