summaryrefslogtreecommitdiff
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2025-03-26 13:42:07 +0100
committerJiri Kosina <jkosina@suse.com>2025-03-26 13:42:07 +0100
commitb3cc7428a32202936904b5b07cf9f135025bafd6 (patch)
treed4a1a6180ac5939fccd92acd6f8d7d1388575c4a /net/core/dev.c
parentdb52926fb0be40e1d588a346df73f5ea3a34a4c6 (diff)
parent01601fdd40ecf4467c8ae4d215dbb7d2a0599a2c (diff)
Merge branch 'for-6.15/amd_sfh' into for-linus
From: Mario Limonciello <mario.limonciello@amd.com> Some platforms include a human presence detection (HPD) sensor. When enabled and a user is detected a wake event will be emitted from the sensor fusion hub that software can react to. Example use cases are "wake from suspend on approach" or to "lock when leaving". This is currently enabled by default on supported systems, but users can't control it. This essentially means that wake on approach is enabled which is a really surprising behavior to users that don't expect it. Instead of defaulting to enabled add a sysfs knob that users can use to enable the feature if desirable and set it to disabled by default.
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c53
1 files changed, 22 insertions, 31 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index afa2282f26043..b91658e8aedb4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6708,7 +6708,7 @@ void napi_resume_irqs(unsigned int napi_id)
static void __napi_hash_add_with_id(struct napi_struct *napi,
unsigned int napi_id)
{
- napi->napi_id = napi_id;
+ WRITE_ONCE(napi->napi_id, napi_id);
hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
}
@@ -9924,6 +9924,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Program bound to different device");
return -EINVAL;
}
+ if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
+ NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
+ return -EINVAL;
+ }
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
return -EINVAL;
@@ -10260,37 +10264,14 @@ static bool from_cleanup_net(void)
#endif
}
-static void rtnl_drop_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- __rtnl_unlock();
-}
-
-static void rtnl_acquire_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- rtnl_lock();
-}
-
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
-static LIST_HEAD(net_todo_list_for_cleanup_net);
-
-/* TODO: net_todo_list/net_todo_list_for_cleanup_net should probably
- * be provided by callers, instead of being static, rtnl protected.
- */
-static struct list_head *todo_list(void)
-{
- return from_cleanup_net() ? &net_todo_list_for_cleanup_net :
- &net_todo_list;
-}
-
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
- list_add_tail(&dev->todo_list, todo_list());
+ list_add_tail(&dev->todo_list, &net_todo_list);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -11140,7 +11121,7 @@ void netdev_run_todo(void)
#endif
/* Snapshot list, allow later requests */
- list_replace_init(todo_list(), &list);
+ list_replace_init(&net_todo_list, &list);
__rtnl_unlock();
@@ -11305,6 +11286,20 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
const struct net_device_core_stats __percpu *p;
+ /*
+ * IPv{4,6} and udp tunnels share common stat helpers and use
+ * different stat type (NETDEV_PCPU_STAT_TSTATS vs
+ * NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent.
+ */
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) !=
+ offsetof(struct pcpu_dstats, rx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) !=
+ offsetof(struct pcpu_dstats, rx_packets));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) !=
+ offsetof(struct pcpu_dstats, tx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) !=
+ offsetof(struct pcpu_dstats, tx_packets));
+
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
ops->ndo_get_stats64(dev, storage);
@@ -11785,11 +11780,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
netdev_unlock(dev);
}
-
- rtnl_drop_if_cleanup_net();
flush_all_backlogs();
+
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
@@ -11849,9 +11842,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
#endif
}
- rtnl_drop_if_cleanup_net();
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);