diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-02-07 18:55:15 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-02-07 18:55:16 -0800 |
commit | b6b614558ed5b2ca50edacc0f2fbf5f52158c86c (patch) | |
tree | f7dcd82263393d8c8dee032a7f6926cd43f9a045 /net/xfrm/xfrm_interface_core.c | |
parent | a1e55f51035e6aa65cf2d11d2147f2bf9edf81f9 (diff) | |
parent | 8962daccc2d32812fe24bd21496c036eb4f454b0 (diff) |
Merge branch 'net-more-factorization-in-cleanup_net-paths'
Eric Dumazet says:
====================
net: more factorization in cleanup_net() paths
This series is inspired by recent syzbot reports hinting to RTNL and
workqueue abuses.
rtnl_lock() is unfair to (single threaded) cleanup_net(), because
many threads can cause contention on it.
This series adds a new (struct pernet_operations) method,
so that cleanup_net() can hold RTNL longer once it finally
acquires it.
It also factorizes unregister_netdevice_many(), to further
reduce stalls in cleanup_net().
Link: https://lore.kernel.org/netdev/CANn89iLJrrJs+6Vc==Un4rVKcpV0Eof4F_4w1_wQGxUCE2FWAg@mail.gmail.com/T/#u
https://lore.kernel.org/netdev/170688415193.5216.10499830272732622816@kwain/
====================
Link: https://lore.kernel.org/r/20240206144313.2050392-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/xfrm/xfrm_interface_core.c')
-rw-r--r-- | net/xfrm/xfrm_interface_core.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index 21d50d75c260..dafefef3cf51 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -957,12 +957,12 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = { .get_link_net = xfrmi_get_link_net, }; -static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) +static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list, + struct list_head *dev_to_kill) { struct net *net; - LIST_HEAD(list); - rtnl_lock(); + ASSERT_RTNL(); list_for_each_entry(net, net_exit_list, exit_list) { struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); struct xfrm_if __rcu **xip; @@ -973,18 +973,16 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) for (xip = &xfrmn->xfrmi[i]; (xi = rtnl_dereference(*xip)) != NULL; xip = &xi->next) - unregister_netdevice_queue(xi->dev, &list); + unregister_netdevice_queue(xi->dev, dev_to_kill); } xi = rtnl_dereference(xfrmn->collect_md_xfrmi); if (xi) - unregister_netdevice_queue(xi->dev, &list); + unregister_netdevice_queue(xi->dev, dev_to_kill); } - unregister_netdevice_many(&list); - rtnl_unlock(); } static struct pernet_operations xfrmi_net_ops = { - .exit_batch = xfrmi_exit_batch_net, + .exit_batch_rtnl = xfrmi_exit_batch_rtnl, .id = &xfrmi_net_id, .size = sizeof(struct xfrmi_net), }; |